2019-05-20 10:19:02 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2011-04-22 14:03:08 +04:00
/*
* PTP 1588 clock support - private declarations for the core module .
*
* Copyright ( C ) 2010 OMICRON electronics GmbH
*/
# ifndef _PTP_PRIVATE_H_
# define _PTP_PRIVATE_H_
# include <linux/cdev.h>
# include <linux/device.h>
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 01:30:02 +03:00
# include <linux/kthread.h>
2011-04-22 14:03:08 +04:00
# include <linux/mutex.h>
# include <linux/posix-clock.h>
# include <linux/ptp_clock.h>
# include <linux/ptp_clock_kernel.h>
# include <linux/time.h>
2023-10-12 01:39:54 +03:00
# include <linux/list.h>
2023-10-12 01:39:56 +03:00
# include <linux/bitmap.h>
2023-10-12 01:39:57 +03:00
# include <linux/debugfs.h>
2011-04-22 14:03:08 +04:00
# define PTP_MAX_TIMESTAMPS 128
# define PTP_BUF_TIMESTAMPS 30
2021-06-30 11:11:53 +03:00
# define PTP_DEFAULT_MAX_VCLOCKS 20
2023-10-12 01:39:56 +03:00
# define PTP_MAX_CHANNELS 2048
2011-04-22 14:03:08 +04:00
struct timestamp_event_queue {
struct ptp_extts_event buf [ PTP_MAX_TIMESTAMPS ] ;
int head ;
int tail ;
spinlock_t lock ;
2023-10-12 01:39:54 +03:00
struct list_head qlist ;
2023-10-12 01:39:56 +03:00
unsigned long * mask ;
2023-10-12 01:39:57 +03:00
struct dentry * debugfs_instance ;
struct debugfs_u32_array dfs_bitmap ;
2011-04-22 14:03:08 +04:00
} ;
struct ptp_clock {
struct posix_clock clock ;
2019-12-27 05:26:27 +03:00
struct device dev ;
2011-04-22 14:03:08 +04:00
struct ptp_clock_info * info ;
dev_t devid ;
int index ; /* index into clocks.map */
struct pps_device * pps_source ;
2012-09-22 11:02:01 +04:00
long dialed_frequency ; /* remembers the frequency adjustment */
2023-10-12 01:39:54 +03:00
struct list_head tsevqs ; /* timestamp fifo list */
2023-11-07 11:00:41 +03:00
spinlock_t tsevqs_lock ; /* protects tsevqs from concurrent access */
2014-03-21 01:21:52 +04:00
struct mutex pincfg_mux ; /* protect concurrent info->pin_config access */
2011-04-22 14:03:08 +04:00
wait_queue_head_t tsev_wq ;
int defunct ; /* tells readers to go away when clock is being removed */
2014-03-21 01:21:54 +04:00
struct device_attribute * pin_dev_attr ;
struct attribute * * pin_attr ;
struct attribute_group pin_attr_group ;
2017-02-14 21:23:34 +03:00
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */
const struct attribute_group * pin_attr_groups [ 2 ] ;
ptp: introduce ptp auxiliary worker
Many PTP drivers required to perform some asynchronous or periodic work,
like periodically handling PHC counter overflow or handle delayed timestamp
for RX/TX network packets. In most of the cases, such work is implemented
using workqueues. Unfortunately, Kernel workqueues might introduce
significant delay in work scheduling under high system load and on -RT,
which could cause misbehavior of PTP drivers due to internal counter
overflow, for example, and there is no way to tune its execution policy and
priority manuallly.
Hence, The kthread_worker can be used insted of workqueues, as it create
separte named kthread for each worker and its its execution policy and
priority can be configured using chrt tool.
This prblem was reported for two drivers TI CPSW CPTS and dp83640, so
instead of modifying each of these driver it was proposed to add PTP
auxiliary worker to the PHC subsystem.
The patch adds PTP auxiliary worker in PHC subsystem using kthread_worker
and kthread_delayed_work and introduces two new PHC subsystem APIs:
- long (*do_aux_work)(struct ptp_clock_info *ptp) callback in
ptp_clock_info structure, which driver should assign if it require to
perform asynchronous or periodic work. Driver should return the delay of
the PTP next auxiliary work scheduling time (>=0) or negative value in case
further scheduling is not required.
- int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) which
allows schedule PTP auxiliary work.
The name of kthread_worker thread corresponds PTP PHC device name "ptp%d".
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-07-29 01:30:02 +03:00
struct kthread_worker * kworker ;
struct kthread_delayed_work aux_work ;
2021-06-30 11:11:53 +03:00
unsigned int max_vclocks ;
unsigned int n_vclocks ;
2021-06-30 11:11:54 +03:00
int * vclock_index ;
2021-06-30 11:11:53 +03:00
struct mutex n_vclocks_mux ; /* protect concurrent n_vclocks access */
bool is_virtual_clock ;
2022-05-06 23:01:37 +03:00
bool has_cycles ;
2023-10-12 01:39:57 +03:00
struct dentry * debugfs_root ;
2011-04-22 14:03:08 +04:00
} ;
2021-06-30 11:11:52 +03:00
# define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
# define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc)
# define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work)
struct ptp_vclock {
struct ptp_clock * pclock ;
struct ptp_clock_info info ;
struct ptp_clock * clock ;
2022-05-06 23:01:41 +03:00
struct hlist_node vclock_hash_node ;
2021-06-30 11:11:52 +03:00
struct cyclecounter cc ;
struct timecounter tc ;
2023-02-21 16:06:16 +03:00
struct mutex lock ; /* protects tc/cc */
2021-06-30 11:11:52 +03:00
} ;
2011-04-22 14:03:08 +04:00
/*
* The function queue_cnt ( ) is safe for readers to call without
* holding q - > lock . Readers use this function to verify that the queue
* is nonempty before proceeding with a dequeue operation . The fact
* that a writer might concurrently increment the tail does not
* matter , since the queue remains nonempty nonetheless .
*/
2023-11-09 20:48:59 +03:00
static inline int queue_cnt ( const struct timestamp_event_queue * q )
2011-04-22 14:03:08 +04:00
{
2023-11-09 20:48:59 +03:00
/*
* Paired with WRITE_ONCE ( ) in enqueue_external_timestamp ( ) ,
* ptp_read ( ) , extts_fifo_show ( ) .
*/
int cnt = READ_ONCE ( q - > tail ) - READ_ONCE ( q - > head ) ;
2011-04-22 14:03:08 +04:00
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt ;
}
2021-06-30 11:11:53 +03:00
/* Check if ptp virtual clock is in use */
static inline bool ptp_vclock_in_use ( struct ptp_clock * ptp )
{
bool in_use = false ;
if ( mutex_lock_interruptible ( & ptp - > n_vclocks_mux ) )
return true ;
if ( ! ptp - > is_virtual_clock & & ptp - > n_vclocks )
in_use = true ;
mutex_unlock ( & ptp - > n_vclocks_mux ) ;
return in_use ;
}
2022-05-06 23:01:37 +03:00
/* Check if ptp clock shall be free running */
static inline bool ptp_clock_freerun ( struct ptp_clock * ptp )
{
if ( ptp - > has_cycles )
return false ;
return ptp_vclock_in_use ( ptp ) ;
}
2021-06-30 11:11:55 +03:00
extern struct class * ptp_class ;
2011-04-22 14:03:08 +04:00
/*
* see ptp_chardev . c
*/
2014-03-21 01:21:52 +04:00
/* caller must hold pincfg_mux */
int ptp_set_pinfunc ( struct ptp_clock * ptp , unsigned int pin ,
enum ptp_pin_function func , unsigned int chan ) ;
2023-10-12 01:39:53 +03:00
long ptp_ioctl ( struct posix_clock_context * pccontext , unsigned int cmd ,
unsigned long arg ) ;
2011-04-22 14:03:08 +04:00
2023-10-12 01:39:53 +03:00
int ptp_open ( struct posix_clock_context * pccontext , fmode_t fmode ) ;
2011-04-22 14:03:08 +04:00
2023-10-12 01:39:53 +03:00
int ptp_release ( struct posix_clock_context * pccontext ) ;
2011-04-22 14:03:08 +04:00
2023-10-12 01:39:53 +03:00
ssize_t ptp_read ( struct posix_clock_context * pccontext , uint flags , char __user * buf ,
size_t cnt ) ;
__poll_t ptp_poll ( struct posix_clock_context * pccontext , struct file * fp ,
poll_table * wait ) ;
2011-04-22 14:03:08 +04:00
/*
* see ptp_sysfs . c
*/
2013-07-25 02:05:20 +04:00
extern const struct attribute_group * ptp_groups [ ] ;
2011-04-22 14:03:08 +04:00
2017-02-14 21:23:34 +03:00
int ptp_populate_pin_groups ( struct ptp_clock * ptp ) ;
void ptp_cleanup_pin_groups ( struct ptp_clock * ptp ) ;
2011-04-22 14:03:08 +04:00
2021-06-30 11:11:52 +03:00
struct ptp_vclock * ptp_vclock_register ( struct ptp_clock * pclock ) ;
void ptp_vclock_unregister ( struct ptp_vclock * vclock ) ;
2011-04-22 14:03:08 +04:00
# endif