2019-05-19 13:08:55 +01:00
// SPDX-License-Identifier: GPL-2.0-only
2007-02-16 01:28:04 -08:00
# include <linux/clockchips.h>
2008-09-06 14:19:17 +02:00
# include <linux/interrupt.h>
2011-05-26 12:22:53 -04:00
# include <linux/export.h>
2007-10-12 23:04:06 +02:00
# include <linux/delay.h>
2006-06-26 00:25:12 -07:00
# include <linux/hpet.h>
2008-09-05 18:02:17 -07:00
# include <linux/cpu.h>
2019-06-23 15:23:54 +02:00
# include <linux/irq.h>
2006-06-26 00:25:12 -07:00
2008-09-06 14:19:17 +02:00
# include <asm/hpet.h>
2011-06-01 19:05:06 +01:00
# include <asm/time.h>
2006-06-26 00:25:12 -07:00
2019-06-23 15:23:42 +02:00
# undef pr_fmt
# define pr_fmt(fmt) "hpet: " fmt
2019-06-23 15:23:49 +02:00
struct hpet_dev {
struct clock_event_device evt ;
unsigned int num ;
int cpu ;
unsigned int irq ;
unsigned int flags ;
char name [ 10 ] ;
} ;
2019-06-23 15:24:00 +02:00
enum hpet_mode {
HPET_MODE_UNUSED ,
HPET_MODE_LEGACY ,
HPET_MODE_CLOCKEVT ,
HPET_MODE_DEVICE ,
} ;
2019-06-23 15:23:58 +02:00
struct hpet_channel {
unsigned int num ;
2019-06-23 15:23:59 +02:00
unsigned int irq ;
2019-06-23 15:24:00 +02:00
enum hpet_mode mode ;
2019-06-23 15:23:58 +02:00
unsigned int boot_cfg ;
} ;
struct hpet_base {
unsigned int nr_channels ;
unsigned int boot_cfg ;
struct hpet_channel * channels ;
} ;
2008-09-06 14:19:17 +02:00
# define HPET_MASK CLOCKSOURCE_MASK(32)
2006-06-26 00:25:12 -07:00
2008-09-05 18:02:18 -07:00
# define HPET_DEV_USED_BIT 2
# define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
# define HPET_DEV_VALID 0x8
# define HPET_DEV_FSB_CAP 0x1000
# define HPET_DEV_PERI_CAP 0x2000
2010-12-13 12:43:23 +01:00
# define HPET_MIN_CYCLES 128
# define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
2007-02-16 01:28:04 -08:00
/*
* HPET address is set in acpi / boot . c , when an ACPI entry exists
*/
2008-09-06 14:19:17 +02:00
unsigned long hpet_address ;
2009-08-04 12:07:09 -07:00
u8 hpet_blockid ; /* OS timer block num */
2015-10-19 04:35:44 -06:00
bool hpet_msi_disable ;
2010-01-21 11:09:52 -08:00
2008-11-25 08:42:01 +01:00
# ifdef CONFIG_PCI_MSI
2019-06-23 15:23:49 +02:00
static struct hpet_dev * hpet_devs ;
static DEFINE_PER_CPU ( struct hpet_dev * , cpu_hpet_dev ) ;
static struct irq_domain * hpet_domain ;
2008-11-25 08:42:01 +01:00
# endif
2019-06-23 15:23:49 +02:00
2008-09-06 14:19:17 +02:00
static void __iomem * hpet_virt_address ;
2019-06-23 15:23:58 +02:00
static struct hpet_base hpet_base ;
2007-02-16 01:28:04 -08:00
2019-06-23 15:23:49 +02:00
static bool hpet_legacy_int_enabled ;
static unsigned long hpet_freq ;
bool boot_hpet_disable ;
bool hpet_force_user ;
static bool hpet_verbose ;
static struct clock_event_device hpet_clockevent ;
2008-09-05 18:02:17 -07:00
2019-06-23 15:23:53 +02:00
static inline
struct hpet_dev * clockevent_to_channel ( struct clock_event_device * evt )
2011-11-18 15:28:22 +01:00
{
2019-06-23 15:23:53 +02:00
return container_of ( evt , struct hpet_dev , evt ) ;
2011-11-18 15:28:22 +01:00
}
2009-08-19 08:44:24 +01:00
inline unsigned int hpet_readl ( unsigned int a )
2007-02-16 01:28:04 -08:00
{
return readl ( hpet_virt_address + a ) ;
}
2009-08-19 08:44:24 +01:00
static inline void hpet_writel ( unsigned int d , unsigned int a )
2007-02-16 01:28:04 -08:00
{
writel ( d , hpet_virt_address + a ) ;
}
2007-10-12 23:04:06 +02:00
static inline void hpet_set_mapping ( void )
{
hpet_virt_address = ioremap_nocache ( hpet_address , HPET_MMAP_SIZE ) ;
}
static inline void hpet_clear_mapping ( void )
{
iounmap ( hpet_virt_address ) ;
hpet_virt_address = NULL ;
}
2007-02-16 01:28:04 -08:00
/*
* HPET command line enable / disable
*/
2008-09-06 14:19:17 +02:00
static int __init hpet_setup ( char * str )
2007-02-16 01:28:04 -08:00
{
2012-04-02 15:17:36 +01:00
while ( str ) {
char * next = strchr ( str , ' , ' ) ;
if ( next )
* next + + = 0 ;
2007-02-16 01:28:04 -08:00
if ( ! strncmp ( " disable " , str , 7 ) )
2015-10-19 04:35:44 -06:00
boot_hpet_disable = true ;
2007-10-19 20:35:02 +02:00
if ( ! strncmp ( " force " , str , 5 ) )
2015-10-19 04:35:44 -06:00
hpet_force_user = true ;
2009-02-21 00:09:47 +01:00
if ( ! strncmp ( " verbose " , str , 7 ) )
2015-10-19 04:35:44 -06:00
hpet_verbose = true ;
2012-04-02 15:17:36 +01:00
str = next ;
2007-02-16 01:28:04 -08:00
}
return 1 ;
}
__setup ( " hpet= " , hpet_setup ) ;
2007-10-12 23:04:06 +02:00
static int __init disable_hpet ( char * str )
{
2015-10-19 04:35:44 -06:00
boot_hpet_disable = true ;
2007-10-12 23:04:06 +02:00
return 1 ;
}
__setup ( " nohpet " , disable_hpet ) ;
2007-02-16 01:28:04 -08:00
static inline int is_hpet_capable ( void )
{
2008-09-06 14:19:17 +02:00
return ! boot_hpet_disable & & hpet_address ;
2007-02-16 01:28:04 -08:00
}
/**
2019-06-23 15:23:55 +02:00
* is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled
2007-02-16 01:28:04 -08:00
*/
int is_hpet_enabled ( void )
{
return is_hpet_capable ( ) & & hpet_legacy_int_enabled ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( is_hpet_enabled ) ;
2007-02-16 01:28:04 -08:00
2009-02-21 00:09:47 +01:00
static void _hpet_print_config ( const char * function , int line )
{
2019-06-23 15:23:57 +02:00
u32 i , id , period , cfg , status , channels , l , h ;
2019-06-23 15:23:42 +02:00
pr_info ( " %s(%d): \n " , function , line ) ;
2019-06-23 15:23:57 +02:00
id = hpet_readl ( HPET_ID ) ;
period = hpet_readl ( HPET_PERIOD ) ;
pr_info ( " ID: 0x%x, PERIOD: 0x%x \n " , id , period ) ;
cfg = hpet_readl ( HPET_CFG ) ;
status = hpet_readl ( HPET_STATUS ) ;
pr_info ( " CFG: 0x%x, STATUS: 0x%x \n " , cfg , status ) ;
2009-02-21 00:09:47 +01:00
l = hpet_readl ( HPET_COUNTER ) ;
h = hpet_readl ( HPET_COUNTER + 4 ) ;
2019-06-23 15:23:42 +02:00
pr_info ( " COUNTER_l: 0x%x, COUNTER_h: 0x%x \n " , l , h ) ;
2009-02-21 00:09:47 +01:00
2019-06-23 15:23:57 +02:00
channels = ( ( id & HPET_ID_NUMBER ) > > HPET_ID_NUMBER_SHIFT ) + 1 ;
for ( i = 0 ; i < channels ; i + + ) {
2009-02-21 00:09:47 +01:00
l = hpet_readl ( HPET_Tn_CFG ( i ) ) ;
h = hpet_readl ( HPET_Tn_CFG ( i ) + 4 ) ;
2019-06-23 15:23:42 +02:00
pr_info ( " T%d: CFG_l: 0x%x, CFG_h: 0x%x \n " , i , l , h ) ;
2019-06-23 15:23:57 +02:00
2009-02-21 00:09:47 +01:00
l = hpet_readl ( HPET_Tn_CMP ( i ) ) ;
h = hpet_readl ( HPET_Tn_CMP ( i ) + 4 ) ;
2019-06-23 15:23:42 +02:00
pr_info ( " T%d: CMP_l: 0x%x, CMP_h: 0x%x \n " , i , l , h ) ;
2019-06-23 15:23:57 +02:00
2009-02-21 00:09:47 +01:00
l = hpet_readl ( HPET_Tn_ROUTE ( i ) ) ;
h = hpet_readl ( HPET_Tn_ROUTE ( i ) + 4 ) ;
2019-06-23 15:23:42 +02:00
pr_info ( " T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x \n " , i , l , h ) ;
2009-02-21 00:09:47 +01:00
}
}
# define hpet_print_config() \
do { \
if ( hpet_verbose ) \
2015-02-12 15:01:31 -08:00
_hpet_print_config ( __func__ , __LINE__ ) ; \
2009-02-21 00:09:47 +01:00
} while ( 0 )
2007-02-16 01:28:04 -08:00
/*
2019-06-23 15:23:55 +02:00
* When the HPET driver ( / dev / hpet ) is enabled , we need to reserve
2007-02-16 01:28:04 -08:00
* timer 0 and timer 1 in case of RTC emulation .
*/
# ifdef CONFIG_HPET
x86: using HPET in MSI mode and setting up per CPU HPET timers, fix
On Sat, Sep 06, 2008 at 06:03:53AM -0700, Ingo Molnar wrote:
>
> it crashes two testsystems, the fault on a NULL pointer in hpet init,
> with:
>
> initcall print_all_ICs+0x0/0x520 returned 0 after 26 msecs
> calling hpet_late_init+0x0/0x1c0
> BUG: unable to handle kernel NULL pointer dereference at 000000000000008c
> IP: [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> PGD 0
> Oops: 0000 [1] SMP
> CPU 0
> Modules linked in:
> Pid: 1, comm: swapper Not tainted 2.6.27-rc5 #29725
> RIP: 0010:[<ffffffff80d228be>] [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP: 0018:ffff88003fa07dd0 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000003 RCX: 0000000000000000
> RDX: ffffc20000000160 RSI: 0000000000000000 RDI: 0000000000000003
> RBP: ffff88003fa07e90 R08: 0000000000000000 R09: ffff88003fa07dd0
> R10: 0000000000000001 R11: 0000000000000000 R12: ffff88003fa07dd0
> R13: 0000000000000002 R14: ffffc20000000000 R15: 000000006f57e511
> FS: 0000000000000000(0000) GS:ffffffff80cf6a80(0000) knlGS:0000000000000000
> CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
> CR2: 000000000000008c CR3: 0000000000201000 CR4: 00000000000006e0
> DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
> Process swapper (pid: 1, threadinfo ffff88003fa06000, task ffff88003fa08000)
> Stack: 00000000fed00000 ffffc20000000000 0000000100000003 0000000800000002
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> Call Trace:
> [<ffffffff80d227c0>] ? hpet_late_init+0x0/0x1c0
> [<ffffffff80209045>] do_one_initcall+0x45/0x190
> [<ffffffff80296f39>] ? register_irq_proc+0x19/0xe0
> [<ffffffff80d0d140>] ? early_idt_handler+0x0/0x73
> [<ffffffff80d0dabc>] kernel_init+0x14c/0x1b0
> [<ffffffff80942ac1>] ? trace_hardirqs_on_thunk+0x3a/0x3f
> [<ffffffff8020dbd9>] child_rip+0xa/0x11
> [<ffffffff8020ceee>] ? restore_args+0x0/0x30
> [<ffffffff80d0d970>] ? kernel_init+0x0/0x1b0
> [<ffffffff8020dbcf>] ? child_rip+0x0/0x11
> Code: 20 48 83 c1 01 48 39 f1 75 e3 44 89 e8 4c 8b 05 29 29 22 00 31 f6 48 8d 78 01 66 66 90 89 f0 48 8d 04 80 48 c1 e0 05 4a 8d 0c 00 <f6> 81 8c 00 00 00 08 74 26 8b 81 80 00 00 00 8b 91 88 00 00 00
> RIP [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP <ffff88003fa07dd0>
> CR2: 000000000000008c
> Kernel panic - not syncing: Fatal exception
There was one code path, with CONFIG_PCI_MSI disabled, where we were accessing
hpet_devs without initialization. That resulted in the above crash. The change
below adds a check for hpet_devs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-08 10:18:40 -07:00
2008-09-24 10:03:17 -07:00
static void hpet_reserve_msi_timers ( struct hpet_data * hd ) ;
x86: using HPET in MSI mode and setting up per CPU HPET timers, fix
On Sat, Sep 06, 2008 at 06:03:53AM -0700, Ingo Molnar wrote:
>
> it crashes two testsystems, the fault on a NULL pointer in hpet init,
> with:
>
> initcall print_all_ICs+0x0/0x520 returned 0 after 26 msecs
> calling hpet_late_init+0x0/0x1c0
> BUG: unable to handle kernel NULL pointer dereference at 000000000000008c
> IP: [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> PGD 0
> Oops: 0000 [1] SMP
> CPU 0
> Modules linked in:
> Pid: 1, comm: swapper Not tainted 2.6.27-rc5 #29725
> RIP: 0010:[<ffffffff80d228be>] [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP: 0018:ffff88003fa07dd0 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000003 RCX: 0000000000000000
> RDX: ffffc20000000160 RSI: 0000000000000000 RDI: 0000000000000003
> RBP: ffff88003fa07e90 R08: 0000000000000000 R09: ffff88003fa07dd0
> R10: 0000000000000001 R11: 0000000000000000 R12: ffff88003fa07dd0
> R13: 0000000000000002 R14: ffffc20000000000 R15: 000000006f57e511
> FS: 0000000000000000(0000) GS:ffffffff80cf6a80(0000) knlGS:0000000000000000
> CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
> CR2: 000000000000008c CR3: 0000000000201000 CR4: 00000000000006e0
> DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
> Process swapper (pid: 1, threadinfo ffff88003fa06000, task ffff88003fa08000)
> Stack: 00000000fed00000 ffffc20000000000 0000000100000003 0000000800000002
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> Call Trace:
> [<ffffffff80d227c0>] ? hpet_late_init+0x0/0x1c0
> [<ffffffff80209045>] do_one_initcall+0x45/0x190
> [<ffffffff80296f39>] ? register_irq_proc+0x19/0xe0
> [<ffffffff80d0d140>] ? early_idt_handler+0x0/0x73
> [<ffffffff80d0dabc>] kernel_init+0x14c/0x1b0
> [<ffffffff80942ac1>] ? trace_hardirqs_on_thunk+0x3a/0x3f
> [<ffffffff8020dbd9>] child_rip+0xa/0x11
> [<ffffffff8020ceee>] ? restore_args+0x0/0x30
> [<ffffffff80d0d970>] ? kernel_init+0x0/0x1b0
> [<ffffffff8020dbcf>] ? child_rip+0x0/0x11
> Code: 20 48 83 c1 01 48 39 f1 75 e3 44 89 e8 4c 8b 05 29 29 22 00 31 f6 48 8d 78 01 66 66 90 89 f0 48 8d 04 80 48 c1 e0 05 4a 8d 0c 00 <f6> 81 8c 00 00 00 08 74 26 8b 81 80 00 00 00 8b 91 88 00 00 00
> RIP [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP <ffff88003fa07dd0>
> CR2: 000000000000008c
> Kernel panic - not syncing: Fatal exception
There was one code path, with CONFIG_PCI_MSI disabled, where we were accessing
hpet_devs without initialization. That resulted in the above crash. The change
below adds a check for hpet_devs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-08 10:18:40 -07:00
2019-06-23 15:23:59 +02:00
static void __init hpet_reserve_platform_timers ( void )
2007-02-16 01:28:04 -08:00
{
struct hpet_data hd ;
2019-06-23 15:23:59 +02:00
unsigned int i ;
2007-02-16 01:28:04 -08:00
2008-09-06 14:19:17 +02:00
memset ( & hd , 0 , sizeof ( hd ) ) ;
hd . hd_phys_address = hpet_address ;
2019-06-23 15:23:59 +02:00
hd . hd_address = hpet_virt_address ;
hd . hd_nirqs = hpet_base . nr_channels ;
2007-02-16 01:28:04 -08:00
hpet_reserve_timer ( & hd , 0 ) ;
# ifdef CONFIG_HPET_EMULATE_RTC
hpet_reserve_timer ( & hd , 1 ) ;
# endif
2008-04-04 16:26:10 +02:00
2008-07-29 12:47:38 -07:00
/*
* NOTE that hd_irq [ ] reflects IOAPIC input pins ( LEGACY_8254
* is wrong for i8259 ! ) not the output IRQ . Many BIOS writers
* don ' t bother configuring * any * comparator interrupts .
*/
2007-02-16 01:28:04 -08:00
hd . hd_irq [ 0 ] = HPET_LEGACY_8254 ;
hd . hd_irq [ 1 ] = HPET_LEGACY_RTC ;
2019-06-23 15:23:59 +02:00
for ( i = 2 ; i < hpet_base . nr_channels ; i + + )
hd . hd_irq [ i ] = hpet_base . channels [ i ] . irq ;
2008-04-04 16:26:10 +02:00
x86: using HPET in MSI mode and setting up per CPU HPET timers, fix
On Sat, Sep 06, 2008 at 06:03:53AM -0700, Ingo Molnar wrote:
>
> it crashes two testsystems, the fault on a NULL pointer in hpet init,
> with:
>
> initcall print_all_ICs+0x0/0x520 returned 0 after 26 msecs
> calling hpet_late_init+0x0/0x1c0
> BUG: unable to handle kernel NULL pointer dereference at 000000000000008c
> IP: [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> PGD 0
> Oops: 0000 [1] SMP
> CPU 0
> Modules linked in:
> Pid: 1, comm: swapper Not tainted 2.6.27-rc5 #29725
> RIP: 0010:[<ffffffff80d228be>] [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP: 0018:ffff88003fa07dd0 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000003 RCX: 0000000000000000
> RDX: ffffc20000000160 RSI: 0000000000000000 RDI: 0000000000000003
> RBP: ffff88003fa07e90 R08: 0000000000000000 R09: ffff88003fa07dd0
> R10: 0000000000000001 R11: 0000000000000000 R12: ffff88003fa07dd0
> R13: 0000000000000002 R14: ffffc20000000000 R15: 000000006f57e511
> FS: 0000000000000000(0000) GS:ffffffff80cf6a80(0000) knlGS:0000000000000000
> CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
> CR2: 000000000000008c CR3: 0000000000201000 CR4: 00000000000006e0
> DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
> Process swapper (pid: 1, threadinfo ffff88003fa06000, task ffff88003fa08000)
> Stack: 00000000fed00000 ffffc20000000000 0000000100000003 0000000800000002
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> Call Trace:
> [<ffffffff80d227c0>] ? hpet_late_init+0x0/0x1c0
> [<ffffffff80209045>] do_one_initcall+0x45/0x190
> [<ffffffff80296f39>] ? register_irq_proc+0x19/0xe0
> [<ffffffff80d0d140>] ? early_idt_handler+0x0/0x73
> [<ffffffff80d0dabc>] kernel_init+0x14c/0x1b0
> [<ffffffff80942ac1>] ? trace_hardirqs_on_thunk+0x3a/0x3f
> [<ffffffff8020dbd9>] child_rip+0xa/0x11
> [<ffffffff8020ceee>] ? restore_args+0x0/0x30
> [<ffffffff80d0d970>] ? kernel_init+0x0/0x1b0
> [<ffffffff8020dbcf>] ? child_rip+0x0/0x11
> Code: 20 48 83 c1 01 48 39 f1 75 e3 44 89 e8 4c 8b 05 29 29 22 00 31 f6 48 8d 78 01 66 66 90 89 f0 48 8d 04 80 48 c1 e0 05 4a 8d 0c 00 <f6> 81 8c 00 00 00 08 74 26 8b 81 80 00 00 00 8b 91 88 00 00 00
> RIP [<ffffffff80d228be>] hpet_late_init+0xfe/0x1c0
> RSP <ffff88003fa07dd0>
> CR2: 000000000000008c
> Kernel panic - not syncing: Fatal exception
There was one code path, with CONFIG_PCI_MSI disabled, where we were accessing
hpet_devs without initialization. That resulted in the above crash. The change
below adds a check for hpet_devs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-08 10:18:40 -07:00
hpet_reserve_msi_timers ( & hd ) ;
2008-09-05 18:02:18 -07:00
2007-02-16 01:28:04 -08:00
hpet_alloc ( & hd ) ;
2008-04-04 16:26:10 +02:00
2007-02-16 01:28:04 -08:00
}
# else
2019-06-23 15:23:59 +02:00
static inline void hpet_reserve_platform_timers ( void ) { }
2007-02-16 01:28:04 -08:00
# endif
2019-06-23 15:23:55 +02:00
/* Common HPET functions */
2009-02-21 00:10:44 +01:00
static void hpet_stop_counter ( void )
2007-02-16 01:28:04 -08:00
{
2015-10-19 04:35:44 -06:00
u32 cfg = hpet_readl ( HPET_CFG ) ;
2019-06-23 15:23:57 +02:00
2007-02-16 01:28:04 -08:00
cfg & = ~ HPET_CFG_ENABLE ;
hpet_writel ( cfg , HPET_CFG ) ;
2009-04-21 20:00:37 +02:00
}
static void hpet_reset_counter ( void )
{
2007-02-16 01:28:04 -08:00
hpet_writel ( 0 , HPET_COUNTER ) ;
hpet_writel ( 0 , HPET_COUNTER + 4 ) ;
2009-02-21 00:10:44 +01:00
}
static void hpet_start_counter ( void )
{
2009-08-19 08:44:24 +01:00
unsigned int cfg = hpet_readl ( HPET_CFG ) ;
2019-06-23 15:23:57 +02:00
2007-02-16 01:28:04 -08:00
cfg | = HPET_CFG_ENABLE ;
hpet_writel ( cfg , HPET_CFG ) ;
}
2009-02-21 00:10:44 +01:00
static void hpet_restart_counter ( void )
{
hpet_stop_counter ( ) ;
2009-04-21 20:00:37 +02:00
hpet_reset_counter ( ) ;
2009-02-21 00:10:44 +01:00
hpet_start_counter ( ) ;
}
2007-10-12 23:04:23 +02:00
static void hpet_resume_device ( void )
{
2007-10-12 23:04:24 +02:00
force_hpet_resume ( ) ;
2007-10-12 23:04:23 +02:00
}
2010-02-02 14:41:39 -08:00
static void hpet_resume_counter ( struct clocksource * cs )
2007-10-12 23:04:23 +02:00
{
hpet_resume_device ( ) ;
2009-02-21 00:10:44 +01:00
hpet_restart_counter ( ) ;
2007-10-12 23:04:23 +02:00
}
2007-10-12 23:04:23 +02:00
static void hpet_enable_legacy_int ( void )
2007-02-16 01:28:04 -08:00
{
2009-08-19 08:44:24 +01:00
unsigned int cfg = hpet_readl ( HPET_CFG ) ;
2007-02-16 01:28:04 -08:00
cfg | = HPET_CFG_LEGACY ;
hpet_writel ( cfg , HPET_CFG ) ;
2015-10-19 04:35:44 -06:00
hpet_legacy_int_enabled = true ;
2007-02-16 01:28:04 -08:00
}
2007-10-12 23:04:23 +02:00
static void hpet_legacy_clockevent_register ( void )
{
/* Start HPET legacy interrupts */
hpet_enable_legacy_int ( ) ;
/*
2019-06-23 15:23:56 +02:00
* Start HPET with the boot CPU ' s cpumask and make it global after
* the IO_APIC has been initialized .
2007-10-12 23:04:23 +02:00
*/
2017-06-20 11:31:54 +02:00
hpet_clockevent . cpumask = cpumask_of ( boot_cpu_data . cpu_index ) ;
2011-05-18 21:33:43 +00:00
clockevents_config_and_register ( & hpet_clockevent , hpet_freq ,
HPET_MIN_PROG_DELTA , 0x7FFFFFFF ) ;
2007-10-12 23:04:23 +02:00
global_clock_event = & hpet_clockevent ;
2019-06-23 15:23:42 +02:00
pr_debug ( " Clockevent registered \n " ) ;
2007-10-12 23:04:23 +02:00
}
2015-07-16 16:28:45 +05:30
static int hpet_set_periodic ( struct clock_event_device * evt , int timer )
2007-02-16 01:28:04 -08:00
{
2009-08-19 08:44:24 +01:00
unsigned int cfg , cmp , now ;
2007-02-16 01:28:04 -08:00
uint64_t delta ;
2015-07-16 16:28:45 +05:30
hpet_stop_counter ( ) ;
delta = ( ( uint64_t ) ( NSEC_PER_SEC / HZ ) ) * evt - > mult ;
delta > > = evt - > shift ;
now = hpet_readl ( HPET_COUNTER ) ;
cmp = now + ( unsigned int ) delta ;
cfg = hpet_readl ( HPET_Tn_CFG ( timer ) ) ;
cfg | = HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT ;
hpet_writel ( cfg , HPET_Tn_CFG ( timer ) ) ;
hpet_writel ( cmp , HPET_Tn_CMP ( timer ) ) ;
udelay ( 1 ) ;
/*
* HPET on AMD 81 xx needs a second write ( with HPET_TN_SETVAL
* cleared ) to T0_CMP to set the period . The HPET_TN_SETVAL
* bit is automatically cleared after the first write .
* ( See AMD - 8111 HyperTransport I / O Hub Data Sheet ,
* Publication # 24674 )
*/
hpet_writel ( ( unsigned int ) delta , HPET_Tn_CMP ( timer ) ) ;
hpet_start_counter ( ) ;
hpet_print_config ( ) ;
return 0 ;
}
static int hpet_set_oneshot ( struct clock_event_device * evt , int timer )
{
unsigned int cfg ;
cfg = hpet_readl ( HPET_Tn_CFG ( timer ) ) ;
cfg & = ~ HPET_TN_PERIODIC ;
cfg | = HPET_TN_ENABLE | HPET_TN_32BIT ;
hpet_writel ( cfg , HPET_Tn_CFG ( timer ) ) ;
return 0 ;
}
static int hpet_shutdown ( struct clock_event_device * evt , int timer )
{
unsigned int cfg ;
cfg = hpet_readl ( HPET_Tn_CFG ( timer ) ) ;
cfg & = ~ HPET_TN_ENABLE ;
hpet_writel ( cfg , HPET_Tn_CFG ( timer ) ) ;
return 0 ;
}
2017-07-31 22:07:09 +02:00
static int hpet_resume ( struct clock_event_device * evt )
{
hpet_enable_legacy_int ( ) ;
2015-07-16 16:28:45 +05:30
hpet_print_config ( ) ;
return 0 ;
2007-02-16 01:28:04 -08:00
}
2019-06-23 15:23:45 +02:00
static int hpet_next_event ( unsigned long delta , int channel )
2007-02-16 01:28:04 -08:00
{
2008-09-06 03:03:32 +02:00
u32 cnt ;
2010-09-15 15:11:57 +02:00
s32 res ;
2007-02-16 01:28:04 -08:00
cnt = hpet_readl ( HPET_COUNTER ) ;
2008-09-06 03:03:32 +02:00
cnt + = ( u32 ) delta ;
2019-06-23 15:23:45 +02:00
hpet_writel ( cnt , HPET_Tn_CMP ( channel ) ) ;
2007-02-16 01:28:04 -08:00
2008-09-06 03:06:08 +02:00
/*
2010-09-15 15:11:57 +02:00
* HPETs are a complete disaster . The compare register is
* based on a equal comparison and neither provides a less
* than or equal functionality ( which would require to take
* the wraparound into account ) nor a simple count down event
* mode . Further the write to the comparator register is
* delayed internally up to two HPET clock cycles in certain
2010-12-13 12:43:23 +01:00
* chipsets ( ATI , ICH9 , 10 ) . Some newer AMD chipsets have even
* longer delays . We worked around that by reading back the
* compare register , but that required another workaround for
* ICH9 , 10 chips where the first readout after write can
* return the old stale value . We already had a minimum
* programming delta of 5u s enforced , but a NMI or SMI hitting
2010-09-15 15:11:57 +02:00
* between the counter readout and the comparator write can
* move us behind that point easily . Now instead of reading
* the compare register back several times , we make the ETIME
* decision based on the following : Return ETIME if the
2010-12-13 12:43:23 +01:00
* counter value after the write is less than HPET_MIN_CYCLES
2010-09-15 15:11:57 +02:00
* away from the event or if the counter is already ahead of
2010-12-13 12:43:23 +01:00
* the event . The minimum programming delta for the generic
* clockevents code is set to 1.5 * HPET_MIN_CYCLES .
2008-09-06 03:06:08 +02:00
*/
2010-09-15 15:11:57 +02:00
res = ( s32 ) ( cnt - hpet_readl ( HPET_COUNTER ) ) ;
2008-09-06 03:06:08 +02:00
2010-12-13 12:43:23 +01:00
return res < HPET_MIN_CYCLES ? - ETIME : 0 ;
2007-02-16 01:28:04 -08:00
}
2015-07-16 16:28:45 +05:30
static int hpet_legacy_shutdown ( struct clock_event_device * evt )
2008-09-05 18:02:16 -07:00
{
2015-07-16 16:28:45 +05:30
return hpet_shutdown ( evt , 0 ) ;
}
static int hpet_legacy_set_oneshot ( struct clock_event_device * evt )
{
return hpet_set_oneshot ( evt , 0 ) ;
}
static int hpet_legacy_set_periodic ( struct clock_event_device * evt )
{
return hpet_set_periodic ( evt , 0 ) ;
}
static int hpet_legacy_resume ( struct clock_event_device * evt )
{
2017-07-31 22:07:09 +02:00
return hpet_resume ( evt ) ;
2008-09-05 18:02:16 -07:00
}
static int hpet_legacy_next_event ( unsigned long delta ,
2019-06-23 15:23:57 +02:00
struct clock_event_device * evt )
2008-09-05 18:02:16 -07:00
{
2019-06-23 15:23:45 +02:00
return hpet_next_event ( delta , 0 ) ;
2008-09-05 18:02:16 -07:00
}
2015-07-16 16:28:45 +05:30
/*
2019-06-23 15:23:55 +02:00
* The HPET clock event device
2015-07-16 16:28:45 +05:30
*/
static struct clock_event_device hpet_clockevent = {
. name = " hpet " ,
. features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT ,
. set_state_periodic = hpet_legacy_set_periodic ,
. set_state_oneshot = hpet_legacy_set_oneshot ,
. set_state_shutdown = hpet_legacy_shutdown ,
. tick_resume = hpet_legacy_resume ,
. set_next_event = hpet_legacy_next_event ,
. irq = 0 ,
. rating = 50 ,
} ;
2008-09-05 18:02:17 -07:00
/*
* HPET MSI Support
*/
2008-09-05 18:02:18 -07:00
# ifdef CONFIG_PCI_MSI
2008-09-24 10:03:17 -07:00
2010-09-28 16:18:39 +02:00
void hpet_msi_unmask ( struct irq_data * data )
2008-09-05 18:02:17 -07:00
{
2015-06-01 16:05:18 +08:00
struct hpet_dev * hdev = irq_data_get_irq_handler_data ( data ) ;
2009-08-19 08:44:24 +01:00
unsigned int cfg ;
2008-09-05 18:02:17 -07:00
/* unmask it */
cfg = hpet_readl ( HPET_Tn_CFG ( hdev - > num ) ) ;
2012-11-02 14:02:40 +00:00
cfg | = HPET_TN_ENABLE | HPET_TN_FSB ;
2008-09-05 18:02:17 -07:00
hpet_writel ( cfg , HPET_Tn_CFG ( hdev - > num ) ) ;
}
2010-09-28 16:18:39 +02:00
void hpet_msi_mask ( struct irq_data * data )
2008-09-05 18:02:17 -07:00
{
2015-06-01 16:05:18 +08:00
struct hpet_dev * hdev = irq_data_get_irq_handler_data ( data ) ;
2009-08-19 08:44:24 +01:00
unsigned int cfg ;
2008-09-05 18:02:17 -07:00
/* mask it */
cfg = hpet_readl ( HPET_Tn_CFG ( hdev - > num ) ) ;
2012-11-02 14:02:40 +00:00
cfg & = ~ ( HPET_TN_ENABLE | HPET_TN_FSB ) ;
2008-09-05 18:02:17 -07:00
hpet_writel ( cfg , HPET_Tn_CFG ( hdev - > num ) ) ;
}
2010-09-28 16:18:39 +02:00
void hpet_msi_write ( struct hpet_dev * hdev , struct msi_msg * msg )
2008-09-05 18:02:17 -07:00
{
hpet_writel ( msg - > data , HPET_Tn_ROUTE ( hdev - > num ) ) ;
hpet_writel ( msg - > address_lo , HPET_Tn_ROUTE ( hdev - > num ) + 4 ) ;
}
2015-07-16 16:28:45 +05:30
static int hpet_msi_shutdown ( struct clock_event_device * evt )
2008-09-05 18:02:18 -07:00
{
2019-06-23 15:23:53 +02:00
return hpet_shutdown ( evt , clockevent_to_channel ( evt ) - > num ) ;
2015-07-16 16:28:45 +05:30
}
static int hpet_msi_set_oneshot ( struct clock_event_device * evt )
{
2019-06-23 15:23:53 +02:00
return hpet_set_oneshot ( evt , clockevent_to_channel ( evt ) - > num ) ;
2015-07-16 16:28:45 +05:30
}
static int hpet_msi_set_periodic ( struct clock_event_device * evt )
{
2019-06-23 15:23:53 +02:00
return hpet_set_periodic ( evt , clockevent_to_channel ( evt ) - > num ) ;
2015-07-16 16:28:45 +05:30
}
static int hpet_msi_resume ( struct clock_event_device * evt )
{
2019-06-23 15:23:53 +02:00
struct hpet_dev * hdev = clockevent_to_channel ( evt ) ;
2017-07-31 22:07:09 +02:00
struct irq_data * data = irq_get_irq_data ( hdev - > irq ) ;
struct msi_msg msg ;
2015-07-16 16:28:45 +05:30
2017-07-31 22:07:09 +02:00
/* Restore the MSI msg and unmask the interrupt */
irq_chip_compose_msi_msg ( data , & msg ) ;
hpet_msi_write ( hdev , & msg ) ;
hpet_msi_unmask ( data ) ;
return 0 ;
2008-09-05 18:02:18 -07:00
}
static int hpet_msi_next_event ( unsigned long delta ,
2019-06-23 15:23:53 +02:00
struct clock_event_device * evt )
2008-09-05 18:02:18 -07:00
{
2019-06-23 15:23:53 +02:00
return hpet_next_event ( delta , clockevent_to_channel ( evt ) - > num ) ;
2008-09-05 18:02:18 -07:00
}
static irqreturn_t hpet_interrupt_handler ( int irq , void * data )
{
2019-06-23 15:23:53 +02:00
struct hpet_dev * dev = data ;
2019-06-23 15:23:55 +02:00
struct clock_event_device * evt = & dev - > evt ;
2008-09-05 18:02:18 -07:00
2019-06-23 15:23:55 +02:00
if ( ! evt - > event_handler ) {
2019-06-23 15:23:42 +02:00
pr_info ( " Spurious interrupt HPET timer %d \n " , dev - > num ) ;
2008-09-05 18:02:18 -07:00
return IRQ_HANDLED ;
}
2019-06-23 15:23:55 +02:00
evt - > event_handler ( evt ) ;
2008-09-05 18:02:18 -07:00
return IRQ_HANDLED ;
}
static int hpet_setup_irq ( struct hpet_dev * dev )
{
if ( request_irq ( dev - > irq , hpet_interrupt_handler ,
2014-03-04 21:35:05 +01:00
IRQF_TIMER | IRQF_NOBALANCING ,
2009-06-14 17:46:01 +02:00
dev - > name , dev ) )
2008-09-05 18:02:18 -07:00
return - 1 ;
disable_irq ( dev - > irq ) ;
2008-12-13 21:20:26 +10:30
irq_set_affinity ( dev - > irq , cpumask_of ( dev - > cpu ) ) ;
2008-09-05 18:02:18 -07:00
enable_irq ( dev - > irq ) ;
2019-06-23 15:23:42 +02:00
pr_debug ( " %s irq %d for MSI \n " , dev - > name , dev - > irq ) ;
2008-09-25 11:53:11 -07:00
2008-09-05 18:02:18 -07:00
return 0 ;
}
static void init_one_hpet_msi_clockevent ( struct hpet_dev * hdev , int cpu )
{
struct clock_event_device * evt = & hdev - > evt ;
if ( ! ( hdev - > flags & HPET_DEV_VALID ) )
return ;
hdev - > cpu = cpu ;
per_cpu ( cpu_hpet_dev , cpu ) = hdev ;
evt - > name = hdev - > name ;
hpet_setup_irq ( hdev ) ;
evt - > irq = hdev - > irq ;
evt - > rating = 110 ;
evt - > features = CLOCK_EVT_FEAT_ONESHOT ;
2015-07-16 16:28:45 +05:30
if ( hdev - > flags & HPET_DEV_PERI_CAP ) {
2008-09-05 18:02:18 -07:00
evt - > features | = CLOCK_EVT_FEAT_PERIODIC ;
2015-07-16 16:28:45 +05:30
evt - > set_state_periodic = hpet_msi_set_periodic ;
}
2008-09-05 18:02:18 -07:00
2015-07-16 16:28:45 +05:30
evt - > set_state_shutdown = hpet_msi_shutdown ;
evt - > set_state_oneshot = hpet_msi_set_oneshot ;
evt - > tick_resume = hpet_msi_resume ;
2008-09-05 18:02:18 -07:00
evt - > set_next_event = hpet_msi_next_event ;
2008-12-13 21:20:26 +10:30
evt - > cpumask = cpumask_of ( hdev - > cpu ) ;
2011-05-18 21:33:43 +00:00
clockevents_config_and_register ( evt , hpet_freq , HPET_MIN_PROG_DELTA ,
0x7FFFFFFF ) ;
2008-09-05 18:02:18 -07:00
}
2019-06-23 15:23:50 +02:00
static struct hpet_dev * hpet_get_unused_timer ( void )
{
int i ;
if ( ! hpet_devs )
return NULL ;
2019-06-23 15:23:59 +02:00
for ( i = 0 ; i < hpet_base . nr_channels ; i + + ) {
2019-06-23 15:23:50 +02:00
struct hpet_dev * hdev = & hpet_devs [ i ] ;
if ( ! ( hdev - > flags & HPET_DEV_VALID ) )
continue ;
if ( test_and_set_bit ( HPET_DEV_USED_BIT ,
( unsigned long * ) & hdev - > flags ) )
continue ;
return hdev ;
}
return NULL ;
}
static int hpet_cpuhp_online ( unsigned int cpu )
{
struct hpet_dev * hdev = hpet_get_unused_timer ( ) ;
if ( hdev )
init_one_hpet_msi_clockevent ( hdev , cpu ) ;
return 0 ;
}
static int hpet_cpuhp_dead ( unsigned int cpu )
{
struct hpet_dev * hdev = per_cpu ( cpu_hpet_dev , cpu ) ;
if ( ! hdev )
return 0 ;
free_irq ( hdev - > irq , hdev ) ;
hdev - > flags & = ~ HPET_DEV_USED ;
per_cpu ( cpu_hpet_dev , cpu ) = NULL ;
return 0 ;
}
2008-09-05 18:02:18 -07:00
# ifdef CONFIG_HPET
/* Reserve at least one timer for userspace (/dev/hpet) */
# define RESERVE_TIMERS 1
# else
# define RESERVE_TIMERS 0
# endif
2008-09-24 10:03:17 -07:00
2019-06-23 15:23:47 +02:00
static void __init hpet_msi_capability_lookup ( unsigned int start_timer )
2008-09-05 18:02:18 -07:00
{
unsigned int num_timers ;
unsigned int num_timers_used = 0 ;
2015-04-13 14:11:34 +08:00
int i , irq ;
2008-09-05 18:02:18 -07:00
2010-01-21 11:09:52 -08:00
if ( hpet_msi_disable )
return ;
2009-08-12 11:16:12 +08:00
if ( boot_cpu_has ( X86_FEATURE_ARAT ) )
return ;
2008-09-05 18:02:18 -07:00
2019-06-23 15:23:59 +02:00
num_timers = hpet_base . nr_channels ;
2009-02-21 00:09:47 +01:00
hpet_print_config ( ) ;
2008-09-05 18:02:18 -07:00
2015-04-13 14:11:34 +08:00
hpet_domain = hpet_create_irq_domain ( hpet_blockid ) ;
if ( ! hpet_domain )
return ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
hpet_devs = kcalloc ( num_timers , sizeof ( struct hpet_dev ) , GFP_KERNEL ) ;
2008-09-05 18:02:18 -07:00
if ( ! hpet_devs )
return ;
for ( i = start_timer ; i < num_timers - RESERVE_TIMERS ; i + + ) {
struct hpet_dev * hdev = & hpet_devs [ num_timers_used ] ;
2019-06-23 15:23:59 +02:00
unsigned int cfg = hpet_base . channels [ i ] . boot_cfg ;
2008-09-05 18:02:18 -07:00
/* Only consider HPET timer with MSI support */
if ( ! ( cfg & HPET_TN_FSB_CAP ) )
continue ;
2015-06-21 16:21:50 +02:00
hdev - > flags = 0 ;
if ( cfg & HPET_TN_PERIODIC_CAP )
hdev - > flags | = HPET_DEV_PERI_CAP ;
sprintf ( hdev - > name , " hpet%d " , i ) ;
hdev - > num = i ;
2015-04-13 14:11:34 +08:00
irq = hpet_assign_irq ( hpet_domain , hdev , hdev - > num ) ;
2015-06-20 11:50:50 +02:00
if ( irq < = 0 )
2015-04-13 14:11:34 +08:00
continue ;
hdev - > irq = irq ;
2008-09-05 18:02:18 -07:00
hdev - > flags | = HPET_DEV_FSB_CAP ;
hdev - > flags | = HPET_DEV_VALID ;
num_timers_used + + ;
if ( num_timers_used = = num_possible_cpus ( ) )
break ;
}
2019-06-23 15:23:42 +02:00
pr_info ( " %d channels of %d reserved for per-cpu timers \n " ,
2008-09-05 18:02:18 -07:00
num_timers , num_timers_used ) ;
}
2008-09-24 10:03:17 -07:00
# ifdef CONFIG_HPET
2019-06-23 15:23:47 +02:00
static void __init hpet_reserve_msi_timers ( struct hpet_data * hd )
2008-09-24 10:03:17 -07:00
{
int i ;
if ( ! hpet_devs )
return ;
2019-06-23 15:23:59 +02:00
for ( i = 0 ; i < hpet_base . nr_channels ; i + + ) {
2008-09-24 10:03:17 -07:00
struct hpet_dev * hdev = & hpet_devs [ i ] ;
if ( ! ( hdev - > flags & HPET_DEV_VALID ) )
continue ;
hd - > hd_irq [ hdev - > num ] = hdev - > irq ;
hpet_reserve_timer ( hd , hdev - > num ) ;
}
}
# endif
2008-09-05 18:02:18 -07:00
# else
2019-06-23 15:23:48 +02:00
static inline void hpet_msi_capability_lookup ( unsigned int start_timer ) { }
2008-09-24 10:03:17 -07:00
# ifdef CONFIG_HPET
2019-06-23 15:23:48 +02:00
static inline void hpet_reserve_msi_timers ( struct hpet_data * hd ) { }
2008-09-24 10:03:17 -07:00
# endif
2008-09-05 18:02:18 -07:00
2016-07-13 17:16:30 +00:00
# define hpet_cpuhp_online NULL
# define hpet_cpuhp_dead NULL
2008-09-05 18:02:18 -07:00
# endif
2007-03-05 00:30:50 -08:00
/*
* Clock source related code
*/
2016-09-06 13:22:10 -04:00
# if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
/*
* Reading the HPET counter is a very slow operation . If a large number of
* CPUs are trying to access the HPET counter simultaneously , it can cause
2019-06-23 15:23:56 +02:00
* massive delays and slow down system performance dramatically . This may
2016-09-06 13:22:10 -04:00
* happen when HPET is the default clock source instead of TSC . For a
* really large system with hundreds of CPUs , the slowdown may be so
2019-06-23 15:23:56 +02:00
* severe , that it can actually crash the system because of a NMI watchdog
2016-09-06 13:22:10 -04:00
* soft lockup , for example .
*
* If multiple CPUs are trying to access the HPET counter at the same time ,
* we don ' t actually need to read the counter multiple times . Instead , the
* other CPUs can use the counter value read by the first CPU in the group .
*
* This special feature is only enabled on x86 - 64 systems . It is unlikely
* that 32 - bit x86 systems will have enough CPUs to require this feature
2019-06-23 15:23:56 +02:00
* with its associated locking overhead . We also need 64 - bit atomic read .
2016-09-06 13:22:10 -04:00
*
2019-06-23 15:23:55 +02:00
* The lock and the HPET value are stored together and can be read in a
2016-09-06 13:22:10 -04:00
* single atomic 64 - bit read . It is explicitly assumed that arch_spinlock_t
* is 32 bits in size .
*/
union hpet_lock {
struct {
arch_spinlock_t lock ;
u32 value ;
} ;
u64 lockval ;
} ;
static union hpet_lock hpet __cacheline_aligned = {
{ . lock = __ARCH_SPIN_LOCK_UNLOCKED , } ,
} ;
2016-12-21 20:32:01 +01:00
static u64 read_hpet ( struct clocksource * cs )
2016-09-06 13:22:10 -04:00
{
unsigned long flags ;
union hpet_lock old , new ;
BUILD_BUG_ON ( sizeof ( union hpet_lock ) ! = 8 ) ;
/*
* Read HPET directly if in NMI .
*/
if ( in_nmi ( ) )
2016-12-21 20:32:01 +01:00
return ( u64 ) hpet_readl ( HPET_COUNTER ) ;
2016-09-06 13:22:10 -04:00
/*
* Read the current state of the lock and HPET value atomically .
*/
old . lockval = READ_ONCE ( hpet . lockval ) ;
if ( arch_spin_is_locked ( & old . lock ) )
goto contended ;
local_irq_save ( flags ) ;
if ( arch_spin_trylock ( & hpet . lock ) ) {
new . value = hpet_readl ( HPET_COUNTER ) ;
/*
* Use WRITE_ONCE ( ) to prevent store tearing .
*/
WRITE_ONCE ( hpet . value , new . value ) ;
arch_spin_unlock ( & hpet . lock ) ;
local_irq_restore ( flags ) ;
2016-12-21 20:32:01 +01:00
return ( u64 ) new . value ;
2016-09-06 13:22:10 -04:00
}
local_irq_restore ( flags ) ;
contended :
/*
* Contended case
* - - - - - - - - - - - - - -
* Wait until the HPET value change or the lock is free to indicate
* its value is up - to - date .
*
* It is possible that old . value has already contained the latest
* HPET value while the lock holder was in the process of releasing
* the lock . Checking for lock state change will enable us to return
* the value immediately instead of waiting for the next HPET reader
* to come along .
*/
do {
cpu_relax ( ) ;
new . lockval = READ_ONCE ( hpet . lockval ) ;
} while ( ( new . value = = old . value ) & & arch_spin_is_locked ( & new . lock ) ) ;
2016-12-21 20:32:01 +01:00
return ( u64 ) new . value ;
2016-09-06 13:22:10 -04:00
}
# else
/*
* For UP or 32 - bit .
*/
2016-12-21 20:32:01 +01:00
static u64 read_hpet ( struct clocksource * cs )
2007-03-05 00:30:50 -08:00
{
2016-12-21 20:32:01 +01:00
return ( u64 ) hpet_readl ( HPET_COUNTER ) ;
2007-03-05 00:30:50 -08:00
}
2016-09-06 13:22:10 -04:00
# endif
2007-03-05 00:30:50 -08:00
static struct clocksource clocksource_hpet = {
. name = " hpet " ,
. rating = 250 ,
. read = read_hpet ,
. mask = HPET_MASK ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2009-02-21 00:10:44 +01:00
. resume = hpet_resume_counter ,
2007-03-05 00:30:50 -08:00
} ;
2019-06-23 15:23:43 +02:00
/*
* AMD SB700 based systems with spread spectrum enabled use a SMM based
* HPET emulation to provide proper frequency setting .
*
* On such systems the SMM code is initialized with the first HPET register
* access and takes some time to complete . During this time the config
* register reads 0xffffffff . We check for max 1000 loops whether the
* config register reads a non - 0xffffffff value to make sure that the
* HPET is up and running before we proceed any further .
*
* A counting loop is safe , as the HPET access takes thousands of CPU cycles .
*
* On non - SB700 based machines this check is only done once and has no
* side effects .
*/
static bool __init hpet_cfg_working ( void )
{
int i ;
for ( i = 0 ; i < 1000 ; i + + ) {
if ( hpet_readl ( HPET_CFG ) ! = 0xFFFFFFFF )
return true ;
}
pr_warn ( " Config register invalid. Disabling HPET \n " ) ;
return false ;
}
2019-06-23 15:23:51 +02:00
static bool __init hpet_counting ( void )
{
u64 start , now , t1 ;
hpet_restart_counter ( ) ;
t1 = hpet_readl ( HPET_COUNTER ) ;
start = rdtsc ( ) ;
/*
* We don ' t know the TSC frequency yet , but waiting for
* 200000 TSC cycles is safe :
* 4 GHz = = 50u s
* 1 GHz = = 200u s
*/
do {
2019-06-23 15:23:52 +02:00
if ( t1 ! = hpet_readl ( HPET_COUNTER ) )
return true ;
2019-06-23 15:23:51 +02:00
now = rdtsc ( ) ;
} while ( ( now - start ) < 200000UL ) ;
2019-06-23 15:23:52 +02:00
pr_warn ( " Counter not counting. HPET disabled \n " ) ;
return false ;
2019-06-23 15:23:51 +02:00
}
2012-04-02 15:15:55 +01:00
2008-02-05 00:48:13 +01:00
/**
* hpet_enable - Try to setup the HPET timer . Returns 1 on success .
2007-10-12 23:04:23 +02:00
*/
int __init hpet_enable ( void )
{
2019-06-23 15:23:59 +02:00
u32 hpet_period , cfg , id , irq ;
2019-06-23 15:23:43 +02:00
unsigned int i , channels ;
2019-06-23 15:23:58 +02:00
struct hpet_channel * hc ;
2011-05-18 21:33:43 +00:00
u64 freq ;
2007-10-12 23:04:23 +02:00
if ( ! is_hpet_capable ( ) )
return 0 ;
hpet_set_mapping ( ) ;
2019-03-18 21:19:56 -05:00
if ( ! hpet_virt_address )
return 0 ;
2007-10-12 23:04:23 +02:00
2019-06-23 15:23:43 +02:00
/* Validate that the config register is working */
if ( ! hpet_cfg_working ( ) )
goto out_nohpet ;
2019-06-23 15:23:51 +02:00
/* Validate that the counter is counting */
if ( ! hpet_counting ( ) )
goto out_nohpet ;
2007-10-12 23:04:23 +02:00
/*
* Read the period and check for a sane value :
*/
hpet_period = hpet_readl ( HPET_PERIOD ) ;
if ( hpet_period < HPET_MIN_PERIOD | | hpet_period > HPET_MAX_PERIOD )
goto out_nohpet ;
2019-06-23 15:23:43 +02:00
/* The period is a femtoseconds value. Convert it to a frequency. */
2011-05-18 21:33:43 +00:00
freq = FSEC_PER_SEC ;
do_div ( freq , hpet_period ) ;
hpet_freq = freq ;
2007-10-12 23:04:23 +02:00
/*
* Read the HPET ID register to retrieve the IRQ routing
* information and the number of channels
*/
id = hpet_readl ( HPET_ID ) ;
2009-02-21 00:09:47 +01:00
hpet_print_config ( ) ;
2007-10-12 23:04:23 +02:00
2019-06-23 15:23:43 +02:00
/* This is the HPET channel number which is zero based */
channels = ( ( id & HPET_ID_NUMBER ) > > HPET_ID_NUMBER_SHIFT ) + 1 ;
2012-04-02 15:15:55 +01:00
2007-10-12 23:04:23 +02:00
/*
* The legacy routing mode needs at least two channels , tick timer
* and the rtc emulation channel .
*/
2019-06-23 15:23:58 +02:00
if ( IS_ENABLED ( CONFIG_HPET_EMULATE_RTC ) & & channels < 2 )
2007-10-12 23:04:23 +02:00
goto out_nohpet ;
2019-06-23 15:23:58 +02:00
hc = kcalloc ( channels , sizeof ( * hc ) , GFP_KERNEL ) ;
if ( ! hc ) {
pr_warn ( " Disabling HPET. \n " ) ;
goto out_nohpet ;
}
hpet_base . channels = hc ;
hpet_base . nr_channels = channels ;
/* Read, store and sanitize the global configuration */
2012-04-02 15:15:55 +01:00
cfg = hpet_readl ( HPET_CFG ) ;
2019-06-23 15:23:58 +02:00
hpet_base . boot_cfg = cfg ;
2012-04-02 15:15:55 +01:00
cfg & = ~ ( HPET_CFG_ENABLE | HPET_CFG_LEGACY ) ;
2012-05-25 11:40:09 +01:00
hpet_writel ( cfg , HPET_CFG ) ;
2012-04-02 15:15:55 +01:00
if ( cfg )
2019-06-23 15:23:42 +02:00
pr_warn ( " Global config: Unknown bits %#x \n " , cfg ) ;
2012-04-02 15:15:55 +01:00
2019-06-23 15:23:58 +02:00
/* Read, store and sanitize the per channel configuration */
for ( i = 0 ; i < channels ; i + + , hc + + ) {
hc - > num = i ;
2012-04-02 15:15:55 +01:00
cfg = hpet_readl ( HPET_Tn_CFG ( i ) ) ;
2019-06-23 15:23:58 +02:00
hc - > boot_cfg = cfg ;
2019-06-23 15:23:59 +02:00
irq = ( cfg & Tn_INT_ROUTE_CNF_MASK ) > > Tn_INT_ROUTE_CNF_SHIFT ;
hc - > irq = irq ;
2019-06-23 15:23:58 +02:00
2012-04-02 15:15:55 +01:00
cfg & = ~ ( HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB ) ;
hpet_writel ( cfg , HPET_Tn_CFG ( i ) ) ;
2019-06-23 15:23:58 +02:00
2012-04-02 15:15:55 +01:00
cfg & = ~ ( HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
| HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
| HPET_TN_FSB | HPET_TN_FSB_CAP ) ;
if ( cfg )
2019-06-23 15:23:42 +02:00
pr_warn ( " Channel #%u config: Unknown bits %#x \n " , i , cfg ) ;
2012-04-02 15:15:55 +01:00
}
hpet_print_config ( ) ;
2019-06-23 15:23:51 +02:00
clocksource_register_hz ( & clocksource_hpet , ( u32 ) hpet_freq ) ;
2007-10-12 23:04:23 +02:00
2007-02-16 01:28:04 -08:00
if ( id & HPET_ID_LEGSUP ) {
2007-10-12 23:04:23 +02:00
hpet_legacy_clockevent_register ( ) ;
2019-06-23 15:24:00 +02:00
hpet_base . channels [ 0 ] . mode = HPET_MODE_LEGACY ;
if ( IS_ENABLED ( CONFIG_HPET_EMULATE_RTC ) )
hpet_base . channels [ 1 ] . mode = HPET_MODE_LEGACY ;
2007-02-16 01:28:04 -08:00
return 1 ;
}
return 0 ;
2006-06-26 00:25:12 -07:00
2007-02-16 01:28:04 -08:00
out_nohpet :
2019-06-23 15:23:58 +02:00
kfree ( hpet_base . channels ) ;
hpet_base . channels = NULL ;
hpet_base . nr_channels = 0 ;
2007-10-12 23:04:06 +02:00
hpet_clear_mapping ( ) ;
2008-12-16 13:39:57 +02:00
hpet_address = 0 ;
2007-02-16 01:28:04 -08:00
return 0 ;
}
2007-10-12 23:04:06 +02:00
/*
2019-06-23 15:23:43 +02:00
* The late initialization runs after the PCI quirks have been invoked
* which might have detected a system on which the HPET can be enforced .
2007-10-12 23:04:06 +02:00
*/
static __init int hpet_late_init ( void )
{
2016-07-13 17:16:30 +00:00
int ret ;
2008-09-05 18:02:18 -07:00
2007-10-12 23:04:23 +02:00
if ( ! hpet_address ) {
if ( ! force_hpet_address )
return - ENODEV ;
hpet_address = force_hpet_address ;
hpet_enable ( ) ;
}
2008-12-16 12:32:23 -08:00
if ( ! hpet_virt_address )
return - ENODEV ;
2009-08-12 11:16:12 +08:00
if ( hpet_readl ( HPET_ID ) & HPET_ID_LEGSUP )
hpet_msi_capability_lookup ( 2 ) ;
else
hpet_msi_capability_lookup ( 0 ) ;
2019-06-23 15:23:59 +02:00
hpet_reserve_platform_timers ( ) ;
2009-02-21 00:09:47 +01:00
hpet_print_config ( ) ;
2007-10-12 23:04:23 +02:00
2010-01-21 11:09:52 -08:00
if ( hpet_msi_disable )
return 0 ;
2009-08-12 11:16:12 +08:00
if ( boot_cpu_has ( X86_FEATURE_ARAT ) )
return 0 ;
2016-12-21 20:19:54 +01:00
ret = cpuhp_setup_state ( CPUHP_AP_X86_HPET_ONLINE , " x86/hpet:online " ,
2016-07-13 17:16:30 +00:00
hpet_cpuhp_online , NULL ) ;
if ( ret )
return ret ;
2016-12-21 20:19:54 +01:00
ret = cpuhp_setup_state ( CPUHP_X86_HPET_DEAD , " x86/hpet:dead " , NULL ,
2016-07-13 17:16:30 +00:00
hpet_cpuhp_dead ) ;
if ( ret )
goto err_cpuhp ;
2007-10-12 23:04:06 +02:00
return 0 ;
2016-07-13 17:16:30 +00:00
err_cpuhp :
cpuhp_remove_state ( CPUHP_AP_X86_HPET_ONLINE ) ;
return ret ;
2007-10-12 23:04:06 +02:00
}
fs_initcall ( hpet_late_init ) ;
2007-12-03 17:17:10 +01:00
void hpet_disable ( void )
{
2019-06-23 15:23:58 +02:00
unsigned int i ;
u32 cfg ;
2012-04-02 15:15:55 +01:00
2019-06-23 15:23:58 +02:00
if ( ! is_hpet_capable ( ) | | ! hpet_virt_address )
return ;
2012-04-02 15:15:55 +01:00
2019-06-23 15:23:58 +02:00
/* Restore boot configuration with the enable bit cleared */
cfg = hpet_base . boot_cfg ;
cfg & = ~ HPET_CFG_ENABLE ;
hpet_writel ( cfg , HPET_CFG ) ;
2012-04-02 15:15:55 +01:00
2019-06-23 15:23:58 +02:00
/* Restore the channel boot configuration */
for ( i = 0 ; i < hpet_base . nr_channels ; i + + )
hpet_writel ( hpet_base . channels [ i ] . boot_cfg , HPET_Tn_CFG ( i ) ) ;
2012-04-02 15:15:55 +01:00
2019-06-23 15:23:58 +02:00
/* If the HPET was enabled at boot time, reenable it */
if ( hpet_base . boot_cfg & HPET_CFG_ENABLE )
hpet_writel ( hpet_base . boot_cfg , HPET_CFG ) ;
2007-12-03 17:17:10 +01:00
}
2007-02-16 01:28:04 -08:00
# ifdef CONFIG_HPET_EMULATE_RTC
2019-06-23 15:23:56 +02:00
/*
* HPET in LegacyReplacement mode eats up the RTC interrupt line . When HPET
2007-02-16 01:28:04 -08:00
* is enabled , we support RTC interrupt functionality in software .
2019-06-23 15:23:56 +02:00
*
2007-02-16 01:28:04 -08:00
* RTC has 3 kinds of interrupts :
2019-06-23 15:23:56 +02:00
*
* 1 ) Update Interrupt - generate an interrupt , every second , when the
* RTC clock is updated
* 2 ) Alarm Interrupt - generate an interrupt at a specific time of day
* 3 ) Periodic Interrupt - generate periodic interrupt , with frequencies
* 2 Hz - 8192 Hz ( 2 Hz - 64 Hz for non - root user ) ( all frequencies in powers of 2 )
*
* ( 1 ) and ( 2 ) above are implemented using polling at a frequency of 64 Hz :
* DEFAULT_RTC_INT_FREQ .
*
* The exact frequency is a tradeoff between accuracy and interrupt overhead .
*
* For ( 3 ) , we use interrupts at 64 Hz , or the user specified periodic frequency ,
* if it ' s higher .
2007-02-16 01:28:04 -08:00
*/
# include <linux/mc146818rtc.h>
# include <linux/rtc.h>
# define DEFAULT_RTC_INT_FREQ 64
# define DEFAULT_RTC_SHIFT 6
# define RTC_NUM_INTS 1
static unsigned long hpet_rtc_flags ;
2008-07-23 21:30:47 -07:00
static int hpet_prev_update_sec ;
2007-02-16 01:28:04 -08:00
static struct rtc_time hpet_alarm_time ;
static unsigned long hpet_pie_count ;
2009-02-04 13:40:31 +03:00
static u32 hpet_t1_cmp ;
2009-08-19 08:44:24 +01:00
static u32 hpet_default_delta ;
static u32 hpet_pie_delta ;
2007-02-16 01:28:04 -08:00
static unsigned long hpet_pie_limit ;
2008-01-30 13:33:28 +01:00
static rtc_irq_handler irq_handler ;
2009-02-04 13:40:31 +03:00
/*
2019-06-23 15:23:56 +02:00
* Check that the HPET counter c1 is ahead of c2
2009-02-04 13:40:31 +03:00
*/
static inline int hpet_cnt_ahead ( u32 c1 , u32 c2 )
{
return ( s32 ) ( c2 - c1 ) < 0 ;
}
2008-01-30 13:33:28 +01:00
/*
* Registers a IRQ handler .
*/
int hpet_register_irq_handler ( rtc_irq_handler handler )
{
if ( ! is_hpet_enabled ( ) )
return - ENODEV ;
if ( irq_handler )
return - EBUSY ;
irq_handler = handler ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( hpet_register_irq_handler ) ;
/*
* Deregisters the IRQ handler registered with hpet_register_irq_handler ( )
* and does cleanup .
*/
void hpet_unregister_irq_handler ( rtc_irq_handler handler )
{
if ( ! is_hpet_enabled ( ) )
return ;
irq_handler = NULL ;
hpet_rtc_flags = 0 ;
}
EXPORT_SYMBOL_GPL ( hpet_unregister_irq_handler ) ;
2007-02-16 01:28:04 -08:00
/*
* Timer 1 for RTC emulation . We use one shot mode , as periodic mode
* is not supported by all HPET implementations for timer 1.
*
* hpet_rtc_timer_init ( ) is called when the rtc is initialized .
*/
int hpet_rtc_timer_init ( void )
{
2009-08-19 08:44:24 +01:00
unsigned int cfg , cnt , delta ;
unsigned long flags ;
2007-02-16 01:28:04 -08:00
if ( ! is_hpet_enabled ( ) )
return 0 ;
if ( ! hpet_default_delta ) {
uint64_t clc ;
clc = ( uint64_t ) hpet_clockevent . mult * NSEC_PER_SEC ;
clc > > = hpet_clockevent . shift + DEFAULT_RTC_SHIFT ;
2009-08-19 08:44:24 +01:00
hpet_default_delta = clc ;
2007-02-16 01:28:04 -08:00
}
if ( ! ( hpet_rtc_flags & RTC_PIE ) | | hpet_pie_limit )
delta = hpet_default_delta ;
else
delta = hpet_pie_delta ;
local_irq_save ( flags ) ;
cnt = delta + hpet_readl ( HPET_COUNTER ) ;
hpet_writel ( cnt , HPET_T1_CMP ) ;
hpet_t1_cmp = cnt ;
cfg = hpet_readl ( HPET_T1_CFG ) ;
cfg & = ~ HPET_TN_PERIODIC ;
cfg | = HPET_TN_ENABLE | HPET_TN_32BIT ;
hpet_writel ( cfg , HPET_T1_CFG ) ;
local_irq_restore ( flags ) ;
return 1 ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_rtc_timer_init ) ;
2007-02-16 01:28:04 -08:00
2011-11-18 16:33:06 +01:00
static void hpet_disable_rtc_channel ( void )
{
2015-10-19 04:35:44 -06:00
u32 cfg = hpet_readl ( HPET_T1_CFG ) ;
2019-06-23 15:23:57 +02:00
2011-11-18 16:33:06 +01:00
cfg & = ~ HPET_TN_ENABLE ;
hpet_writel ( cfg , HPET_T1_CFG ) ;
}
2007-02-16 01:28:04 -08:00
/*
* The functions below are called from rtc driver .
* Return 0 if HPET is not being used .
* Otherwise do the necessary changes and return 1.
*/
int hpet_mask_rtc_irq_bit ( unsigned long bit_mask )
{
if ( ! is_hpet_enabled ( ) )
return 0 ;
hpet_rtc_flags & = ~ bit_mask ;
2011-11-18 16:33:06 +01:00
if ( unlikely ( ! hpet_rtc_flags ) )
hpet_disable_rtc_channel ( ) ;
2007-02-16 01:28:04 -08:00
return 1 ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_mask_rtc_irq_bit ) ;
2007-02-16 01:28:04 -08:00
int hpet_set_rtc_irq_bit ( unsigned long bit_mask )
{
unsigned long oldbits = hpet_rtc_flags ;
if ( ! is_hpet_enabled ( ) )
return 0 ;
hpet_rtc_flags | = bit_mask ;
2008-07-23 21:30:47 -07:00
if ( ( bit_mask & RTC_UIE ) & & ! ( oldbits & RTC_UIE ) )
hpet_prev_update_sec = - 1 ;
2007-02-16 01:28:04 -08:00
if ( ! oldbits )
hpet_rtc_timer_init ( ) ;
return 1 ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_set_rtc_irq_bit ) ;
2007-02-16 01:28:04 -08:00
2019-06-23 15:23:57 +02:00
int hpet_set_alarm_time ( unsigned char hrs , unsigned char min , unsigned char sec )
2007-02-16 01:28:04 -08:00
{
if ( ! is_hpet_enabled ( ) )
return 0 ;
hpet_alarm_time . tm_hour = hrs ;
hpet_alarm_time . tm_min = min ;
hpet_alarm_time . tm_sec = sec ;
return 1 ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_set_alarm_time ) ;
2007-02-16 01:28:04 -08:00
int hpet_set_periodic_freq ( unsigned long freq )
{
uint64_t clc ;
if ( ! is_hpet_enabled ( ) )
return 0 ;
2019-06-23 15:23:57 +02:00
if ( freq < = DEFAULT_RTC_INT_FREQ ) {
2007-02-16 01:28:04 -08:00
hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq ;
2019-06-23 15:23:57 +02:00
} else {
2007-02-16 01:28:04 -08:00
clc = ( uint64_t ) hpet_clockevent . mult * NSEC_PER_SEC ;
do_div ( clc , freq ) ;
clc > > = hpet_clockevent . shift ;
2009-08-19 08:44:24 +01:00
hpet_pie_delta = clc ;
x86, hpet: Fix bug in RTC emulation
We think there exists a bug in the HPET code that emulates the RTC.
In the normal case, when the RTC frequency is set, the rtc driver tells
the hpet code about it here:
int hpet_set_periodic_freq(unsigned long freq)
{
uint64_t clc;
if (!is_hpet_enabled())
return 0;
if (freq <= DEFAULT_RTC_INT_FREQ)
hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
else {
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
do_div(clc, freq);
clc >>= hpet_clockevent.shift;
hpet_pie_delta = (unsigned long) clc;
}
return 1;
}
If freq is set to 64Hz (DEFAULT_RTC_INT_FREQ) or lower, then
hpet_pie_limit (a static) is set to non-zero. Then, on every one-shot
HPET interrupt, hpet_rtc_timer_reinit is called to compute the next
timeout. Well, that function has this logic:
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
else
delta = hpet_pie_delta;
Since hpet_pie_limit is not 0, hpet_default_delta is used. That
corresponds to 64Hz.
Now, if you set a different rtc frequency, you'll take the else path
through hpet_set_periodic_freq, but unfortunately no one resets
hpet_pie_limit back to 0.
Boom....now you are stuck with 64Hz RTC interrupts forever.
The patch below just resets the hpet_pie_limit value when requested freq
is greater than DEFAULT_RTC_INT_FREQ, which we think fixes this problem.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
LKML-Reference: <201003112200.o2BM0Hre012875@imap1.linux-foundation.org>
Signed-off-by: Daniel Hecht <dhecht@vmware.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-03-11 14:00:16 -08:00
hpet_pie_limit = 0 ;
2007-02-16 01:28:04 -08:00
}
2019-06-23 15:23:57 +02:00
2007-02-16 01:28:04 -08:00
return 1 ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_set_periodic_freq ) ;
2007-02-16 01:28:04 -08:00
int hpet_rtc_dropped_irq ( void )
{
return is_hpet_enabled ( ) ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_rtc_dropped_irq ) ;
2007-02-16 01:28:04 -08:00
static void hpet_rtc_timer_reinit ( void )
{
2011-11-18 16:33:06 +01:00
unsigned int delta ;
2007-02-16 01:28:04 -08:00
int lost_ints = - 1 ;
2011-11-18 16:33:06 +01:00
if ( unlikely ( ! hpet_rtc_flags ) )
hpet_disable_rtc_channel ( ) ;
2007-02-16 01:28:04 -08:00
if ( ! ( hpet_rtc_flags & RTC_PIE ) | | hpet_pie_limit )
delta = hpet_default_delta ;
else
delta = hpet_pie_delta ;
/*
* Increment the comparator value until we are ahead of the
* current count .
*/
do {
hpet_t1_cmp + = delta ;
hpet_writel ( hpet_t1_cmp , HPET_T1_CMP ) ;
lost_ints + + ;
2009-02-04 13:40:31 +03:00
} while ( ! hpet_cnt_ahead ( hpet_t1_cmp , hpet_readl ( HPET_COUNTER ) ) ) ;
2007-02-16 01:28:04 -08:00
if ( lost_ints ) {
if ( hpet_rtc_flags & RTC_PIE )
hpet_pie_count + = lost_ints ;
if ( printk_ratelimit ( ) )
2019-06-23 15:23:42 +02:00
pr_warn ( " Lost %d RTC interrupts \n " , lost_ints ) ;
2007-02-16 01:28:04 -08:00
}
}
irqreturn_t hpet_rtc_interrupt ( int irq , void * dev_id )
{
struct rtc_time curr_time ;
unsigned long rtc_int_flag = 0 ;
hpet_rtc_timer_reinit ( ) ;
2008-01-30 13:33:28 +01:00
memset ( & curr_time , 0 , sizeof ( struct rtc_time ) ) ;
2007-02-16 01:28:04 -08:00
if ( hpet_rtc_flags & ( RTC_UIE | RTC_AIE ) )
x86/hpet: Fix /dev/rtc breakage caused by RTC cleanup
Ville Syrjälä reports "The first time I run hwclock after rebooting
I get this:
open("/dev/rtc", O_RDONLY) = 3
ioctl(3, PHN_SET_REGS or RTC_UIE_ON, 0) = 0
select(4, [3], NULL, NULL, {10, 0}) = 0 (Timeout)
ioctl(3, PHN_NOT_OH or RTC_UIE_OFF, 0) = 0
close(3) = 0
On all subsequent runs I get this:
open("/dev/rtc", O_RDONLY) = 3
ioctl(3, PHN_SET_REGS or RTC_UIE_ON, 0) = -1 EINVAL (Invalid argument)
ioctl(3, RTC_RD_TIME, 0x7ffd76b3ae70) = -1 EINVAL (Invalid argument)
close(3) = 0"
This was caused by a stupid typo in a patch that should have been
a simple rename to move around contents of a header file, but
accidentally wrote zeroes into the rtc rather than reading from
it:
463a86304cae ("char/genrtc: x86: remove remnants of asm/rtc.h")
Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
Tested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Alessandro Zummo <a.zummo@towertech.it>
Cc: Alexandre Belloni <alexandre.belloni@free-electrons.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rtc-linux@googlegroups.com
Fixes: 463a86304cae ("char/genrtc: x86: remove remnants of asm/rtc.h")
Link: http://lkml.kernel.org/r/20160809195528.1604312-1-arnd@arndb.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-08-09 21:54:53 +02:00
mc146818_get_time ( & curr_time ) ;
2007-02-16 01:28:04 -08:00
if ( hpet_rtc_flags & RTC_UIE & &
curr_time . tm_sec ! = hpet_prev_update_sec ) {
2008-07-23 21:30:47 -07:00
if ( hpet_prev_update_sec > = 0 )
rtc_int_flag = RTC_UF ;
2007-02-16 01:28:04 -08:00
hpet_prev_update_sec = curr_time . tm_sec ;
}
2019-06-23 15:23:57 +02:00
if ( hpet_rtc_flags & RTC_PIE & & + + hpet_pie_count > = hpet_pie_limit ) {
2007-02-16 01:28:04 -08:00
rtc_int_flag | = RTC_PF ;
hpet_pie_count = 0 ;
}
2008-01-15 16:44:38 +01:00
if ( hpet_rtc_flags & RTC_AIE & &
2007-02-16 01:28:04 -08:00
( curr_time . tm_sec = = hpet_alarm_time . tm_sec ) & &
( curr_time . tm_min = = hpet_alarm_time . tm_min ) & &
( curr_time . tm_hour = = hpet_alarm_time . tm_hour ) )
2019-06-23 15:23:57 +02:00
rtc_int_flag | = RTC_AF ;
2007-02-16 01:28:04 -08:00
if ( rtc_int_flag ) {
rtc_int_flag | = ( RTC_IRQF | ( RTC_NUM_INTS < < 8 ) ) ;
2008-01-30 13:33:28 +01:00
if ( irq_handler )
irq_handler ( rtc_int_flag , dev_id ) ;
2007-02-16 01:28:04 -08:00
}
return IRQ_HANDLED ;
}
2008-01-30 13:33:28 +01:00
EXPORT_SYMBOL_GPL ( hpet_rtc_interrupt ) ;
2007-02-16 01:28:04 -08:00
# endif