x86: Fix various typos in comments, take #2

Fix another ~42 single-word typos in arch/x86/ code comments,
missed a few in the first pass, in particular in .S files.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-kernel@vger.kernel.org
This commit is contained in:
Ingo Molnar 2021-03-21 22:28:53 +01:00
parent c681df88dc
commit 163b099146
41 changed files with 42 additions and 42 deletions

View File

@ -5,7 +5,7 @@
* Early support for invoking 32-bit EFI services from a 64-bit kernel. * Early support for invoking 32-bit EFI services from a 64-bit kernel.
* *
* Because this thunking occurs before ExitBootServices() we have to * Because this thunking occurs before ExitBootServices() we have to
* restore the firmware's 32-bit GDT before we make EFI serivce calls, * restore the firmware's 32-bit GDT before we make EFI service calls,
* since the firmware's 32-bit IDT is still currently installed and it * since the firmware's 32-bit IDT is still currently installed and it
* needs to be able to service interrupts. * needs to be able to service interrupts.
* *

View File

@ -231,7 +231,7 @@ SYM_FUNC_START(startup_32)
/* /*
* Setup for the jump to 64bit mode * Setup for the jump to 64bit mode
* *
* When the jump is performend we will be in long mode but * When the jump is performed we will be in long mode but
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
* the new gdt/idt that has __KERNEL_CS with CS.L = 1. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.

View File

@ -24,7 +24,7 @@
/* /*
* Copyright 2012 Xyratex Technology Limited * Copyright 2012 Xyratex Technology Limited
* *
* Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation. * Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>

View File

@ -88,7 +88,7 @@
/* /*
* Combined G1 & G2 function. Reordered with help of rotates to have moves * Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining. * at beginning.
*/ */
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \ #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
/* G1,1 && G2,1 */ \ /* G1,1 && G2,1 */ \

View File

@ -209,7 +209,7 @@
* *
* Lets build a 5 entry IRET frame after that, such that struct pt_regs * Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us * is complete and in particular regs->sp is correct. This gives us
* the original 6 enties as gap: * the original 6 entries as gap:
* *
* 14*4(%esp) - <previous context> * 14*4(%esp) - <previous context>
* 13*4(%esp) - gap / flags * 13*4(%esp) - gap / flags

View File

@ -511,7 +511,7 @@ SYM_CODE_START(\asmsym)
/* /*
* No need to switch back to the IST stack. The current stack is either * No need to switch back to the IST stack. The current stack is either
* identical to the stack in the IRET frame or the VC fall-back stack, * identical to the stack in the IRET frame or the VC fall-back stack,
* so it is definitly mapped even with PTI enabled. * so it is definitely mapped even with PTI enabled.
*/ */
jmp paranoid_exit jmp paranoid_exit

View File

@ -218,7 +218,7 @@ int main(int argc, char **argv)
/* /*
* Figure out the struct name. If we're writing to a .so file, * Figure out the struct name. If we're writing to a .so file,
* generate raw output insted. * generate raw output instead.
*/ */
name = strdup(argv[3]); name = strdup(argv[3]);
namelen = strlen(name); namelen = strlen(name);

View File

@ -29,7 +29,7 @@ __kernel_vsyscall:
* anyone with an AMD CPU, for example). Nonetheless, we try to keep * anyone with an AMD CPU, for example). Nonetheless, we try to keep
* it working approximately as well as it ever worked. * it working approximately as well as it ever worked.
* *
* This link may eludicate some of the history: * This link may elucidate some of the history:
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
* personally, I find it hard to understand what's going on there. * personally, I find it hard to understand what's going on there.
* *

View File

@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
mmap_write_lock(mm); mmap_write_lock(mm);
/* /*
* Check if we have already mapped vdso blob - fail to prevent * Check if we have already mapped vdso blob - fail to prevent
* abusing from userspace install_speciall_mapping, which may * abusing from userspace install_special_mapping, which may
* not do accounting and rlimit right. * not do accounting and rlimit right.
* We could search vma near context.vdso, but it's a slowpath, * We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure. * so let's explicitly check all VMAs to be completely sure.

View File

@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
/* /*
* If the return from callback is zero or negative, return immediately, * If the return from callback is zero or negative, return immediately,
* else re-execute ENCLU with the postive return value interpreted as * else re-execute ENCLU with the positive return value interpreted as
* the requested ENCLU function. * the requested ENCLU function.
*/ */
cmp $0, %eax cmp $0, %eax

View File

@ -594,7 +594,7 @@ static __init int bts_init(void)
* we cannot use the user mapping since it will not be available * we cannot use the user mapping since it will not be available
* if we're not running the owning process. * if we're not running the owning process.
* *
* With PTI we can't use the kernal map either, because its not * With PTI we can't use the kernel map either, because its not
* there when we run userspace. * there when we run userspace.
* *
* For now, disable this driver when using PTI. * For now, disable this driver when using PTI.

View File

@ -1313,7 +1313,7 @@ static __initconst const struct x86_pmu p4_pmu = {
.get_event_constraints = x86_get_event_constraints, .get_event_constraints = x86_get_event_constraints,
/* /*
* IF HT disabled we may need to use all * IF HT disabled we may need to use all
* ARCH_P4_MAX_CCCR counters simulaneously * ARCH_P4_MAX_CCCR counters simultaneously
* though leave it restricted at moment assuming * though leave it restricted at moment assuming
* HT is on * HT is on
*/ */

View File

@ -9,7 +9,7 @@
* Functions to keep the agpgart mappings coherent with the MMU. The * Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias * GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting * region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same * mappings with different cacheability attributes for the same
* page. This avoids data corruption on some CPUs. * page. This avoids data corruption on some CPUs.
*/ */

View File

@ -3,7 +3,7 @@
#define _ASM_X86_INTEL_PT_H #define _ASM_X86_INTEL_PT_H
#define PT_CPUID_LEAVES 2 #define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ #define PT_CPUID_REGS_NUM 4 /* number of registers (eax, ebx, ecx, edx) */
enum pt_capabilities { enum pt_capabilities {
PT_CAP_max_subleaf = 0, PT_CAP_max_subleaf = 0,

View File

@ -8,7 +8,7 @@
/* /*
* The set_memory_* API can be used to change various attributes of a virtual * The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include: * address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack * Cacheability : UnCached, WriteCombining, WriteThrough, WriteBack
* Executability : eXecutable, NoteXecutable * Executability : eXecutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite * Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent * Presence : NotPresent

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Shared support code for AMD K8 northbridges and derivates. * Shared support code for AMD K8 northbridges and derivatives.
* Copyright 2006 Andi Kleen, SUSE Labs. * Copyright 2006 Andi Kleen, SUSE Labs.
*/ */

View File

@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable)
* status which gives the rough battery status, and current power * status which gives the rough battery status, and current power
* source. The bat value returned give an estimate as a percentage * source. The bat value returned give an estimate as a percentage
* of life and a status value for the battery. The estimated life * of life and a status value for the battery. The estimated life
* if reported is a lifetime in secodnds/minutes at current power * if reported is a lifetime in seconds/minutes at current power
* consumption. * consumption.
*/ */

View File

@ -301,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* The operating system must reload CR3 to cause the TLB to be flushed" * The operating system must reload CR3 to cause the TLB to be flushed"
* *
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
* should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
* to be modified. * to be modified.
*/ */
if (c->x86 == 5 && c->x86_model == 9) { if (c->x86 == 5 && c->x86_model == 9) {

View File

@ -142,7 +142,7 @@ static struct severity {
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR) MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
), ),
MCESEV( MCESEV(
KEEP, "Non signalled machine check", KEEP, "Non signaled machine check",
SER, BITCLR(MCI_STATUS_S) SER, BITCLR(MCI_STATUS_S)
), ),

View File

@ -799,7 +799,7 @@ void mtrr_ap_init(void)
* *
* This routine is called in two cases: * This routine is called in two cases:
* *
* 1. very earily time of software resume, when there absolutely * 1. very early time of software resume, when there absolutely
* isn't mtrr entry changes; * isn't mtrr entry changes;
* *
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug

View File

@ -397,7 +397,7 @@ void mon_event_count(void *info)
* timer. Having 1s interval makes the calculation of bandwidth simpler. * timer. Having 1s interval makes the calculation of bandwidth simpler.
* *
* Although MBA's goal is to restrict the bandwidth to a maximum, there may * Although MBA's goal is to restrict the bandwidth to a maximum, there may
* be a need to increase the bandwidth to avoid uncecessarily restricting * be a need to increase the bandwidth to avoid unnecessarily restricting
* the L2 <-> L3 traffic. * the L2 <-> L3 traffic.
* *
* Since MBA controls the L2 external bandwidth where as MBM measures the * Since MBA controls the L2 external bandwidth where as MBM measures the

View File

@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
/* /*
* This creates a directory mon_data which contains the monitored data. * This creates a directory mon_data which contains the monitored data.
* *
* mon_data has one directory for each domain whic are named * mon_data has one directory for each domain which are named
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
* with L3 domain looks as below: * with L3 domain looks as below:
* ./mon_data: * ./mon_data:

View File

@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled * - Write protect disabled
* - No task switch * - No task switch
* - Don't do FP software emulation. * - Don't do FP software emulation.
* - Proctected mode enabled * - Protected mode enabled
*/ */
movl %cr0, %eax movl %cr0, %eax
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax

View File

@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled * - Write protect disabled
* - No task switch * - No task switch
* - Don't do FP software emulation. * - Don't do FP software emulation.
* - Proctected mode enabled * - Protected mode enabled
*/ */
movq %cr0, %rax movq %cr0, %rax
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax

View File

@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait)
} }
/* /*
* Don't wait longer than 10 ms if the caller didn't * Don't wait longer than 10 ms if the caller didn't
* reqeust it. If wait is true, the machine hangs here if * request it. If wait is true, the machine hangs here if
* one or more CPUs do not reach shutdown state. * one or more CPUs do not reach shutdown state.
*/ */
timeout = USEC_PER_MSEC * 10; timeout = USEC_PER_MSEC * 10;

View File

@ -472,7 +472,7 @@ retry:
/* /*
* Add the result to the previous adjustment value. * Add the result to the previous adjustment value.
* *
* The adjustement value is slightly off by the overhead of the * The adjustment value is slightly off by the overhead of the
* sync mechanism (observed values are ~200 TSC cycles), but this * sync mechanism (observed values are ~200 TSC cycles), but this
* really depends on CPU, node distance and frequency. So * really depends on CPU, node distance and frequency. So
* compensating for this is hard to get right. Experiments show * compensating for this is hard to get right. Experiments show

View File

@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
* by whether the operand is a register or a memory location. * by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand * If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least * size. If operand is memory, return only the two least
* siginificant bytes. * significant bytes.
*/ */
if (X86_MODRM_MOD(insn->modrm.value) == 3) if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes; *data_size = insn->opnd_bytes;

View File

@ -727,7 +727,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
struct amd_svm_iommu_ir *ir; struct amd_svm_iommu_ir *ir;
/** /**
* In some cases, the existing irte is updaed and re-set, * In some cases, the existing irte is updated and re-set,
* so we need to check here if it's already been * added * so we need to check here if it's already been * added
* to the ir_list. * to the ir_list.
*/ */

View File

@ -3537,7 +3537,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* snapshot restore (migration). * snapshot restore (migration).
* *
* In this flow, it is assumed that vmcs12 cache was * In this flow, it is assumed that vmcs12 cache was
* trasferred as part of captured nVMX state and should * transferred as part of captured nVMX state and should
* therefore not be read from guest memory (which may not * therefore not be read from guest memory (which may not
* exist on destination host yet). * exist on destination host yet).
*/ */

View File

@ -964,7 +964,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
/* The return value (in eax) is zero if the result is exact, /* The return value (in eax) is zero if the result is exact,
if bits are changed due to rounding, truncation, etc, then if bits are changed due to rounding, truncation, etc, then
a non-zero value is returned */ a non-zero value is returned */
/* Overflow is signalled by a non-zero return value (in eax). /* Overflow is signaled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the In the case of overflow, the returned significand always has the
largest possible value */ largest possible value */
int FPU_round_to_int(FPU_REG *r, u_char tag) int FPU_round_to_int(FPU_REG *r, u_char tag)

View File

@ -575,7 +575,7 @@ Normalise_result:
#ifdef PECULIAR_486 #ifdef PECULIAR_486
/* /*
* This implements a special feature of 80486 behaviour. * This implements a special feature of 80486 behaviour.
* Underflow will be signalled even if the number is * Underflow will be signaled even if the number is
* not a denormal after rounding. * not a denormal after rounding.
* This difference occurs only for masked underflow, and not * This difference occurs only for masked underflow, and not
* in the unmasked case. * in the unmasked case.

View File

@ -1497,7 +1497,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* userspace task is trying to access some valid (from guest's point of * userspace task is trying to access some valid (from guest's point of
* view) memory which is not currently mapped by the host (e.g. the * view) memory which is not currently mapped by the host (e.g. the
* memory is swapped out). Note, the corresponding "page ready" event * memory is swapped out). Note, the corresponding "page ready" event
* which is injected when the memory becomes available, is delived via * which is injected when the memory becomes available, is delivered via
* an interrupt mechanism and not a #PF exception * an interrupt mechanism and not a #PF exception
* (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()). * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
* *

View File

@ -756,7 +756,7 @@ void __init init_mem_mapping(void)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) { if (max_pfn > max_low_pfn) {
/* can we preseve max_low_pfn ?*/ /* can we preserve max_low_pfn ?*/
max_low_pfn = max_pfn; max_low_pfn = max_pfn;
} }
#else #else

View File

@ -128,7 +128,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
/* /*
* Called from the FPU code when creating a fresh set of FPU * Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where * registers. This is called from a very specific context where
* we know the FPU regstiers are safe for use and we can use PKRU * we know the FPU registers are safe for use and we can use PKRU
* directly. * directly.
*/ */
void copy_init_pkru_to_fpregs(void) void copy_init_pkru_to_fpregs(void)

View File

@ -441,7 +441,7 @@ void __init efi_free_boot_services(void)
* 1.4.4 with SGX enabled booting Linux via Fedora 24's * 1.4.4 with SGX enabled booting Linux via Fedora 24's
* grub2-efi on a hard disk. (And no, I don't know why * grub2-efi on a hard disk. (And no, I don't know why
* this happened, but Linux should still try to boot rather * this happened, but Linux should still try to boot rather
* panicing early.) * panicking early.)
*/ */
rm_size = real_mode_size_needed(); rm_size = real_mode_size_needed();
if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {

View File

@ -27,7 +27,7 @@ static bool lid_wake_on_close;
* wake-on-close. This is implemented as standard by the XO-1.5 DSDT. * wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
* *
* We provide here a sysfs attribute that will additionally enable * We provide here a sysfs attribute that will additionally enable
* wake-on-close behavior. This is useful (e.g.) when we oportunistically * wake-on-close behavior. This is useful (e.g.) when we opportunistically
* suspend with the display running; if the lid is then closed, we want to * suspend with the display running; if the lid is then closed, we want to
* wake up to turn the display off. * wake up to turn the display off.
* *

View File

@ -131,7 +131,7 @@ void * __init prom_early_alloc(unsigned long size)
const size_t chunk_size = max(PAGE_SIZE, size); const size_t chunk_size = max(PAGE_SIZE, size);
/* /*
* To mimimize the number of allocations, grab at least * To minimize the number of allocations, grab at least
* PAGE_SIZE of memory (that's an arbitrary choice that's * PAGE_SIZE of memory (that's an arbitrary choice that's
* fast enough on the platforms we care about while minimizing * fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers. * wasted bootmem) and hand off chunks of it to callers.

View File

@ -321,7 +321,7 @@ int hibernate_resume_nonboot_cpu_disable(void)
/* /*
* When bsp_check() is called in hibernate and suspend, cpu hotplug * When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnessary to handle race condition between * is disabled already. So it's unnecessary to handle race condition between
* cpumask query and cpu hotplug. * cpumask query and cpu hotplug.
*/ */
static int bsp_check(void) static int bsp_check(void)

View File

@ -103,7 +103,7 @@ static void __init setup_real_mode(void)
*ptr += phys_base; *ptr += phys_base;
} }
/* Must be perfomed *after* relocation. */ /* Must be performed *after* relocation. */
trampoline_header = (struct trampoline_header *) trampoline_header = (struct trampoline_header *)
__va(real_mode_header->trampoline_header); __va(real_mode_header->trampoline_header);

View File

@ -2410,7 +2410,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
rmd.prot = prot; rmd.prot = prot;
/* /*
* We use the err_ptr to indicate if there we are doing a contiguous * We use the err_ptr to indicate if there we are doing a contiguous
* mapping or a discontigious mapping. * mapping or a discontiguous mapping.
*/ */
rmd.contiguous = !err_ptr; rmd.contiguous = !err_ptr;
rmd.no_translate = no_translate; rmd.no_translate = no_translate;