2015-07-30 22:17:43 +03:00
# ifndef _PIO_H
# define _PIO_H
/*
2017-04-13 06:29:29 +03:00
* Copyright ( c ) 2015 - 2017 Intel Corporation .
2015-07-30 22:17:43 +03:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
/* send context types */
# define SC_KERNEL 0
2016-05-12 20:23:47 +03:00
# define SC_VL15 1
# define SC_ACK 2
# define SC_USER 3 /* must be the last one: it may take all left */
# define SC_MAX 4 /* count of send context types */
2015-07-30 22:17:43 +03:00
/* invalid send context index */
# define INVALID_SCI 0xff
/* PIO buffer release callback function */
typedef void ( * pio_release_cb ) ( void * arg , int code ) ;
/* PIO release codes - in bits, as there could more than one that apply */
# define PRC_OK 0 /* no known error */
# define PRC_STATUS_ERR 0x01 /* credit return due to status error */
# define PRC_PBC 0x02 /* credit return due to PBC */
# define PRC_THRESHOLD 0x04 /* credit return due to threshold */
# define PRC_FILL_ERR 0x08 /* credit return due fill error */
# define PRC_FORCE 0x10 /* credit return due credit force */
# define PRC_SC_DISABLE 0x20 /* clean-up after a context disable */
/* byte helper */
union mix {
u64 val64 ;
u32 val32 [ 2 ] ;
u8 val8 [ 8 ] ;
} ;
/* an allocated PIO buffer */
struct pio_buf {
struct send_context * sc ; /* back pointer to owning send context */
pio_release_cb cb ; /* called when the buffer is released */
void * arg ; /* argument for cb */
void __iomem * start ; /* buffer start address */
void __iomem * end ; /* context end address */
unsigned long sent_at ; /* buffer is sent when <= free */
union mix carry ; /* pending unwritten bytes */
2016-10-25 23:12:34 +03:00
u16 qw_written ; /* QW written so far */
u8 carry_bytes ; /* number of valid bytes in carry */
2015-07-30 22:17:43 +03:00
} ;
/* cache line aligned pio buffer array */
union pio_shadow_ring {
struct pio_buf pbuf ;
} ____cacheline_aligned ;
/* per-NUMA send context */
struct send_context {
/* read-only after init */
struct hfi1_devdata * dd ; /* device */
union pio_shadow_ring * sr ; /* shadow ring */
2016-10-25 23:12:34 +03:00
void __iomem * base_addr ; /* start of PIO memory */
u32 __percpu * buffers_allocated ; /* count of buffers allocated */
u32 size ; /* context size, in bytes */
2016-02-15 07:20:25 +03:00
2015-07-30 22:17:43 +03:00
int node ; /* context home node */
u32 sr_size ; /* size of the shadow ring */
2016-10-25 23:12:34 +03:00
u16 flags ; /* flags */
u8 type ; /* context type */
u8 sw_index ; /* software index number */
u8 hw_context ; /* hardware context number */
u8 group ; /* credit return group */
2015-07-30 22:17:43 +03:00
/* allocator fields */
spinlock_t alloc_lock ____cacheline_aligned_in_smp ;
2016-10-17 14:19:13 +03:00
u32 sr_head ; /* shadow ring head */
2015-07-30 22:17:43 +03:00
unsigned long fill ; /* official alloc count */
unsigned long alloc_free ; /* copy of free (less cache thrash) */
2016-10-25 23:12:28 +03:00
u32 fill_wrap ; /* tracks fill within ring */
2016-10-25 23:12:34 +03:00
u32 credits ; /* number of blocks in context */
/* adding a new field here would make it part of this cacheline */
2015-07-30 22:17:43 +03:00
/* releaser fields */
spinlock_t release_lock ____cacheline_aligned_in_smp ;
u32 sr_tail ; /* shadow ring tail */
2016-10-17 14:19:13 +03:00
unsigned long free ; /* official free count */
volatile __le64 * hw_free ; /* HW free counter */
2015-07-30 22:17:43 +03:00
/* list for PIO waiters */
struct list_head piowait ____cacheline_aligned_in_smp ;
spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp ;
u32 credit_intr_count ; /* count of credit intr users */
2016-10-17 14:19:13 +03:00
u64 credit_ctrl ; /* cache for credit control */
2015-07-30 22:17:43 +03:00
wait_queue_head_t halt_wait ; /* wait until kernel sees interrupt */
2016-10-25 23:12:34 +03:00
struct work_struct halt_work ; /* halted context work queue entry */
2015-07-30 22:17:43 +03:00
} ;
/* send context flags */
# define SCF_ENABLED 0x01
# define SCF_IN_FREE 0x02
# define SCF_HALTED 0x04
# define SCF_FROZEN 0x08
2018-09-20 22:59:14 +03:00
# define SCF_LINK_DOWN 0x10
2015-07-30 22:17:43 +03:00
struct send_context_info {
struct send_context * sc ; /* allocated working context */
u16 allocated ; /* has this been allocated? */
u16 type ; /* context type */
u16 base ; /* base in PIO array */
u16 credits ; /* size in PIO array */
} ;
/* DMA credit return, index is always (context & 0x7) */
struct credit_return {
volatile __le64 cr [ 8 ] ;
} ;
/* NUMA indexed credit return array */
struct credit_return_base {
struct credit_return * va ;
2016-09-06 14:35:54 +03:00
dma_addr_t dma ;
2015-07-30 22:17:43 +03:00
} ;
/* send context configuration sizes (one per type) */
struct sc_config_sizes {
short int size ;
short int count ;
} ;
2016-02-14 23:46:10 +03:00
/*
* The diagram below details the relationship of the mapping structures
*
* Since the mapping now allows for non - uniform send contexts per vl , the
* number of send contexts for a vl is either the vl_scontexts [ vl ] or
* a computation based on num_kernel_send_contexts / num_vls :
*
* For example :
* nactual = vl_scontexts ? vl_scontexts [ vl ] : num_kernel_send_contexts / num_vls
*
* n = roundup to next highest power of 2 using nactual
*
* In the case where there are num_kernel_send_contexts / num_vls doesn ' t divide
* evenly , the extras are added from the last vl downward .
*
* For the case where n > nactual , the send contexts are assigned
* in a round robin fashion wrapping back to the first send context
* for a particular vl .
*
* dd - > pio_map
* | pio_map_elem [ 0 ]
* | + - - - - - - - - - - - - - - - - - - - - +
* v | mask |
* pio_vl_map | - - - - - - - - - - - - - - - - - - - - |
* + - - - - - - - - - - - - - - - - - - - - - - - - - - + | ksc [ 0 ] - > sc 1 |
* | list ( RCU ) | | - - - - - - - - - - - - - - - - - - - - |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | - > | ksc [ 1 ] - > sc 2 |
* | mask | - - / | - - - - - - - - - - - - - - - - - - - - |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | - / | * |
* | actual_vls ( max 8 ) | - / | - - - - - - - - - - - - - - - - - - - - |
2017-03-31 20:04:51 +03:00
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | - - / | ksc [ n - 1 ] - > sc n |
2016-02-14 23:46:10 +03:00
* | vls ( max 8 ) | - / + - - - - - - - - - - - - - - - - - - - - +
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | - - /
* | map [ 0 ] | - /
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | + - - - - - - - - - - - - - - - - - - - - +
* | map [ 1 ] | - - - | mask |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | \ - - - - | - - - - - - - - - - - - - - - - - - - - |
* | * | \ - - | ksc [ 0 ] - > sc 1 + n |
* | * | \ - - - - | - - - - - - - - - - - - - - - - - - - - |
* | * | \ - > | ksc [ 1 ] - > sc 2 + n |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - | | - - - - - - - - - - - - - - - - - - - - |
* | map [ vls - 1 ] | - | * |
* + - - - - - - - - - - - - - - - - - - - - - - - - - - + \ - | - - - - - - - - - - - - - - - - - - - - |
2017-03-31 20:04:51 +03:00
* \ - | ksc [ m - 1 ] - > sc m + n |
2016-02-14 23:46:10 +03:00
* \ + - - - - - - - - - - - - - - - - - - - - +
* \ -
* \
2017-03-31 20:04:51 +03:00
* \ - + - - - - - - - - - - - - - - - - - - - - - - +
* \ - | mask |
* \ | - - - - - - - - - - - - - - - - - - - - - - |
* \ - | ksc [ 0 ] - > sc 1 + m + n |
* \ - | - - - - - - - - - - - - - - - - - - - - - - |
* > | ksc [ 1 ] - > sc 2 + m + n |
* | - - - - - - - - - - - - - - - - - - - - - - |
* | * |
* | - - - - - - - - - - - - - - - - - - - - - - |
* | ksc [ o - 1 ] - > sc o + m + n |
* + - - - - - - - - - - - - - - - - - - - - - - +
2016-02-14 23:46:10 +03:00
*
*/
/* Initial number of send contexts per VL */
# define INIT_SC_PER_VL 2
/*
* struct pio_map_elem - mapping for a vl
* @ mask - selector mask
* @ ksc - array of kernel send contexts for this vl
*
* The mask is used to " mod " the selector to
* produce index into the trailing array of
* kscs
*/
struct pio_map_elem {
u32 mask ;
struct send_context * ksc [ 0 ] ;
} ;
/*
* struct pio_vl_map - mapping for a vl
* @ list - rcu head for free callback
* @ mask - vl mask to " mod " the vl to produce an index to map array
* @ actual_vls - number of vls
* @ vls - numbers of vls rounded to next power of 2
* @ map - array of pio_map_elem entries
*
* This is the parent mapping structure . The trailing members of the
* struct point to pio_map_elem entries , which in turn point to an
* array of kscs for that vl .
*/
struct pio_vl_map {
struct rcu_head list ;
u32 mask ;
u8 actual_vls ;
u8 vls ;
struct pio_map_elem * map [ 0 ] ;
} ;
int pio_map_init ( struct hfi1_devdata * dd , u8 port , u8 num_vls ,
u8 * vl_scontexts ) ;
void free_pio_map ( struct hfi1_devdata * dd ) ;
struct send_context * pio_select_send_context_vl ( struct hfi1_devdata * dd ,
u32 selector , u8 vl ) ;
struct send_context * pio_select_send_context_sc ( struct hfi1_devdata * dd ,
u32 selector , u8 sc5 ) ;
2015-07-30 22:17:43 +03:00
/* send context functions */
int init_credit_return ( struct hfi1_devdata * dd ) ;
void free_credit_return ( struct hfi1_devdata * dd ) ;
int init_sc_pools_and_sizes ( struct hfi1_devdata * dd ) ;
int init_send_contexts ( struct hfi1_devdata * dd ) ;
int init_credit_return ( struct hfi1_devdata * dd ) ;
int init_pervl_scs ( struct hfi1_devdata * dd ) ;
struct send_context * sc_alloc ( struct hfi1_devdata * dd , int type ,
uint hdrqentsize , int numa ) ;
void sc_free ( struct send_context * sc ) ;
int sc_enable ( struct send_context * sc ) ;
void sc_disable ( struct send_context * sc ) ;
int sc_restart ( struct send_context * sc ) ;
void sc_return_credits ( struct send_context * sc ) ;
void sc_flush ( struct send_context * sc ) ;
void sc_drop ( struct send_context * sc ) ;
void sc_stop ( struct send_context * sc , int bit ) ;
struct pio_buf * sc_buffer_alloc ( struct send_context * sc , u32 dw_len ,
2016-02-15 07:21:52 +03:00
pio_release_cb cb , void * arg ) ;
2015-07-30 22:17:43 +03:00
void sc_release_update ( struct send_context * sc ) ;
void sc_return_credits ( struct send_context * sc ) ;
void sc_group_release_update ( struct hfi1_devdata * dd , u32 hw_context ) ;
void sc_add_credit_return_intr ( struct send_context * sc ) ;
void sc_del_credit_return_intr ( struct send_context * sc ) ;
void sc_set_cr_threshold ( struct send_context * sc , u32 new_threshold ) ;
2016-04-12 21:30:28 +03:00
u32 sc_percent_to_threshold ( struct send_context * sc , u32 percent ) ;
2015-07-30 22:17:43 +03:00
u32 sc_mtu_to_threshold ( struct send_context * sc , u32 mtu , u32 hdrqentsize ) ;
void hfi1_sc_wantpiobuf_intr ( struct send_context * sc , u32 needint ) ;
void sc_wait ( struct hfi1_devdata * dd ) ;
void set_pio_integrity ( struct send_context * sc ) ;
/* support functions */
void pio_reset_all ( struct hfi1_devdata * dd ) ;
void pio_freeze ( struct hfi1_devdata * dd ) ;
void pio_kernel_unfreeze ( struct hfi1_devdata * dd ) ;
2018-09-20 22:59:14 +03:00
void pio_kernel_linkup ( struct hfi1_devdata * dd ) ;
2015-07-30 22:17:43 +03:00
/* global PIO send control operations */
# define PSC_GLOBAL_ENABLE 0
# define PSC_GLOBAL_DISABLE 1
# define PSC_GLOBAL_VLARB_ENABLE 2
# define PSC_GLOBAL_VLARB_DISABLE 3
# define PSC_CM_RESET 4
# define PSC_DATA_VL_ENABLE 5
# define PSC_DATA_VL_DISABLE 6
void __cm_reset ( struct hfi1_devdata * dd , u64 sendctrl ) ;
void pio_send_control ( struct hfi1_devdata * dd , int op ) ;
/* PIO copy routines */
void pio_copy ( struct hfi1_devdata * dd , struct pio_buf * pbuf , u64 pbc ,
const void * from , size_t count ) ;
void seg_pio_copy_start ( struct pio_buf * pbuf , u64 pbc ,
2016-02-15 07:21:52 +03:00
const void * from , size_t nbytes ) ;
2015-07-30 22:17:43 +03:00
void seg_pio_copy_mid ( struct pio_buf * pbuf , const void * from , size_t nbytes ) ;
void seg_pio_copy_end ( struct pio_buf * pbuf ) ;
2018-11-28 21:14:32 +03:00
void seqfile_dump_sci ( struct seq_file * s , u32 i ,
struct send_context_info * sci ) ;
2015-07-30 22:17:43 +03:00
# endif /* _PIO_H */