2019-03-05 16:48:42 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 - 2019 HabanaLabs , Ltd .
* All Rights Reserved .
*/
# include "habanalabs.h"
2020-07-28 19:18:51 +02:00
# include "../include/hw_ip/pci/pci_general.h"
2019-03-05 16:48:42 +02:00
# include <linux/pci.h>
2020-06-15 17:45:12 +03:00
# include <linux/bitfield.h>
2019-03-05 16:48:42 +02:00
2019-05-13 14:44:50 +03:00
# define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
2020-06-15 17:45:12 +03:00
# define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
# define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
# define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
# define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
2019-03-05 16:48:42 +02:00
/**
* hl_pci_bars_map ( ) - Map PCI BARs .
* @ hdev : Pointer to hl_device structure .
2020-07-01 09:58:37 +01:00
* @ name : Array of BAR names .
2019-03-05 16:48:42 +02:00
* @ is_wc : Array with flag per BAR whether a write - combined mapping is needed .
*
* Request PCI regions and map them to kernel virtual addresses .
*
* Return : 0 on success , non - zero for failure .
*/
int hl_pci_bars_map ( struct hl_device * hdev , const char * const name [ 3 ] ,
bool is_wc [ 3 ] )
{
struct pci_dev * pdev = hdev - > pdev ;
int rc , i , bar ;
rc = pci_request_regions ( pdev , HL_NAME ) ;
if ( rc ) {
dev_err ( hdev - > dev , " Cannot obtain PCI resources \n " ) ;
return rc ;
}
for ( i = 0 ; i < 3 ; i + + ) {
bar = i * 2 ; /* 64-bit BARs */
hdev - > pcie_bar [ bar ] = is_wc [ i ] ?
pci_ioremap_wc_bar ( pdev , bar ) :
pci_ioremap_bar ( pdev , bar ) ;
if ( ! hdev - > pcie_bar [ bar ] ) {
dev_err ( hdev - > dev , " pci_ioremap%s_bar failed for %s \n " ,
is_wc [ i ] ? " _wc " : " " , name [ i ] ) ;
rc = - ENODEV ;
goto err ;
}
}
return 0 ;
err :
for ( i = 2 ; i > = 0 ; i - - ) {
bar = i * 2 ; /* 64-bit BARs */
if ( hdev - > pcie_bar [ bar ] )
iounmap ( hdev - > pcie_bar [ bar ] ) ;
}
pci_release_regions ( pdev ) ;
return rc ;
}
2020-07-01 09:58:37 +01:00
/**
2019-03-05 16:48:42 +02:00
* hl_pci_bars_unmap ( ) - Unmap PCI BARS .
* @ hdev : Pointer to hl_device structure .
*
* Release all PCI BARs and unmap their virtual addresses .
*/
static void hl_pci_bars_unmap ( struct hl_device * hdev )
{
struct pci_dev * pdev = hdev - > pdev ;
int i , bar ;
for ( i = 2 ; i > = 0 ; i - - ) {
bar = i * 2 ; /* 64-bit BARs */
iounmap ( hdev - > pcie_bar [ bar ] ) ;
}
pci_release_regions ( pdev ) ;
}
2020-07-01 09:58:37 +01:00
/**
2019-03-05 16:48:42 +02:00
* hl_pci_elbi_write ( ) - Write through the ELBI interface .
* @ hdev : Pointer to hl_device structure .
2020-07-01 09:58:37 +01:00
* @ addr : Address to write to
* @ data : Data to write
2019-03-05 16:48:42 +02:00
*
* Return : 0 on success , negative value for failure .
*/
static int hl_pci_elbi_write ( struct hl_device * hdev , u64 addr , u32 data )
{
struct pci_dev * pdev = hdev - > pdev ;
ktime_t timeout ;
2019-05-13 14:44:50 +03:00
u64 msec ;
2019-03-05 16:48:42 +02:00
u32 val ;
2019-05-13 14:44:50 +03:00
if ( hdev - > pldm )
msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC ;
else
msec = HL_PCI_ELBI_TIMEOUT_MSEC ;
2019-03-05 16:48:42 +02:00
/* Clear previous status */
pci_write_config_dword ( pdev , mmPCI_CONFIG_ELBI_STS , 0 ) ;
pci_write_config_dword ( pdev , mmPCI_CONFIG_ELBI_ADDR , ( u32 ) addr ) ;
pci_write_config_dword ( pdev , mmPCI_CONFIG_ELBI_DATA , data ) ;
pci_write_config_dword ( pdev , mmPCI_CONFIG_ELBI_CTRL ,
PCI_CONFIG_ELBI_CTRL_WRITE ) ;
2019-05-13 14:44:50 +03:00
timeout = ktime_add_ms ( ktime_get ( ) , msec ) ;
2019-03-05 16:48:42 +02:00
for ( ; ; ) {
pci_read_config_dword ( pdev , mmPCI_CONFIG_ELBI_STS , & val ) ;
if ( val & PCI_CONFIG_ELBI_STS_MASK )
break ;
if ( ktime_compare ( ktime_get ( ) , timeout ) > 0 ) {
pci_read_config_dword ( pdev , mmPCI_CONFIG_ELBI_STS ,
& val ) ;
break ;
}
usleep_range ( 300 , 500 ) ;
}
if ( ( val & PCI_CONFIG_ELBI_STS_MASK ) = = PCI_CONFIG_ELBI_STS_DONE )
return 0 ;
if ( val & PCI_CONFIG_ELBI_STS_ERR ) {
dev_err ( hdev - > dev , " Error writing to ELBI \n " ) ;
return - EIO ;
}
if ( ! ( val & PCI_CONFIG_ELBI_STS_MASK ) ) {
dev_err ( hdev - > dev , " ELBI write didn't finish in time \n " ) ;
return - EIO ;
}
dev_err ( hdev - > dev , " ELBI write has undefined bits in status \n " ) ;
return - EIO ;
}
/**
* hl_pci_iatu_write ( ) - iatu write routine .
* @ hdev : Pointer to hl_device structure .
2020-07-01 09:58:37 +01:00
* @ addr : Address to write to
* @ data : Data to write
2019-03-05 16:48:42 +02:00
*
* Return : 0 on success , negative value for failure .
*/
int hl_pci_iatu_write ( struct hl_device * hdev , u32 addr , u32 data )
{
struct asic_fixed_properties * prop = & hdev - > asic_prop ;
u32 dbi_offset ;
int rc ;
dbi_offset = addr & 0xFFF ;
rc = hl_pci_elbi_write ( hdev , prop - > pcie_aux_dbi_reg_addr , 0x00300000 ) ;
rc | = hl_pci_elbi_write ( hdev , prop - > pcie_dbi_base_address + dbi_offset ,
data ) ;
if ( rc )
return - EIO ;
return 0 ;
}
2020-07-01 09:58:37 +01:00
/**
2019-03-05 16:48:42 +02:00
* hl_pci_reset_link_through_bridge ( ) - Reset PCI link .
* @ hdev : Pointer to hl_device structure .
*/
static void hl_pci_reset_link_through_bridge ( struct hl_device * hdev )
{
struct pci_dev * pdev = hdev - > pdev ;
struct pci_dev * parent_port ;
u16 val ;
parent_port = pdev - > bus - > self ;
pci_read_config_word ( parent_port , PCI_BRIDGE_CONTROL , & val ) ;
val | = PCI_BRIDGE_CTL_BUS_RESET ;
pci_write_config_word ( parent_port , PCI_BRIDGE_CONTROL , val ) ;
ssleep ( 1 ) ;
val & = ~ ( PCI_BRIDGE_CTL_BUS_RESET ) ;
pci_write_config_word ( parent_port , PCI_BRIDGE_CONTROL , val ) ;
ssleep ( 3 ) ;
}
/**
2020-06-15 17:45:12 +03:00
* hl_pci_set_inbound_region ( ) - Configure inbound region
2019-03-05 16:48:42 +02:00
* @ hdev : Pointer to hl_device structure .
2020-06-15 17:45:12 +03:00
* @ region : Inbound region number .
* @ pci_region : Inbound region parameters .
2019-03-05 16:48:42 +02:00
*
2020-06-15 17:45:12 +03:00
* Configure the iATU inbound region .
2019-03-05 16:48:42 +02:00
*
* Return : 0 on success , negative value for failure .
*/
2020-06-15 17:45:12 +03:00
int hl_pci_set_inbound_region ( struct hl_device * hdev , u8 region ,
struct hl_inbound_pci_region * pci_region )
2019-03-05 16:48:42 +02:00
{
struct asic_fixed_properties * prop = & hdev - > asic_prop ;
2020-06-15 17:45:12 +03:00
u64 bar_phys_base , region_base , region_end_address ;
u32 offset , ctrl_reg_val ;
int rc = 0 ;
2019-03-05 16:48:42 +02:00
2020-06-15 17:45:12 +03:00
/* region offset */
offset = ( 0x200 * region ) + 0x100 ;
if ( pci_region - > mode = = PCI_ADDRESS_MATCH_MODE ) {
bar_phys_base = hdev - > pcie_bar_phys [ pci_region - > bar ] ;
region_base = bar_phys_base + pci_region - > offset_in_bar ;
region_end_address = region_base + pci_region - > size - 1 ;
2019-03-05 16:48:42 +02:00
2020-06-15 17:45:12 +03:00
rc | = hl_pci_iatu_write ( hdev , offset + 0x8 ,
lower_32_bits ( region_base ) ) ;
rc | = hl_pci_iatu_write ( hdev , offset + 0xC ,
upper_32_bits ( region_base ) ) ;
rc | = hl_pci_iatu_write ( hdev , offset + 0x10 ,
lower_32_bits ( region_end_address ) ) ;
2019-03-05 16:48:42 +02:00
}
/* Point to the specified address */
2020-06-15 17:45:12 +03:00
rc = hl_pci_iatu_write ( hdev , offset + 0x14 ,
lower_32_bits ( pci_region - > addr ) ) ;
rc | = hl_pci_iatu_write ( hdev , offset + 0x18 ,
upper_32_bits ( pci_region - > addr ) ) ;
2019-03-05 16:48:42 +02:00
rc | = hl_pci_iatu_write ( hdev , offset + 0x0 , 0 ) ;
2020-06-15 17:45:12 +03:00
/* Enable + bar/address match + match enable + bar number */
ctrl_reg_val = FIELD_PREP ( IATU_REGION_CTRL_REGION_EN_MASK , 1 ) ;
ctrl_reg_val | = FIELD_PREP ( IATU_REGION_CTRL_MATCH_MODE_MASK ,
pci_region - > mode ) ;
ctrl_reg_val | = FIELD_PREP ( IATU_REGION_CTRL_NUM_MATCH_EN_MASK , 1 ) ;
if ( pci_region - > mode = = PCI_BAR_MATCH_MODE )
ctrl_reg_val | = FIELD_PREP ( IATU_REGION_CTRL_BAR_NUM_MASK ,
pci_region - > bar ) ;
rc | = hl_pci_iatu_write ( hdev , offset + 0x4 , ctrl_reg_val ) ;
2019-03-05 16:48:42 +02:00
/* Return the DBI window to the default location */
rc | = hl_pci_elbi_write ( hdev , prop - > pcie_aux_dbi_reg_addr , 0 ) ;
rc | = hl_pci_elbi_write ( hdev , prop - > pcie_aux_dbi_reg_addr + 4 , 0 ) ;
if ( rc )
2020-06-15 17:45:12 +03:00
dev_err ( hdev - > dev , " failed to map bar %u to 0x%08llx \n " ,
pci_region - > bar , pci_region - > addr ) ;
2019-03-05 16:48:42 +02:00
return rc ;
}
/**
2020-06-15 17:45:12 +03:00
* hl_pci_set_outbound_region ( ) - Configure outbound region 0
2019-03-05 16:48:42 +02:00
* @ hdev : Pointer to hl_device structure .
2020-06-15 17:45:12 +03:00
* @ pci_region : Outbound region parameters .
2019-03-05 16:48:42 +02:00
*
2020-06-15 17:45:12 +03:00
* Configure the iATU outbound region 0.
2019-03-05 16:48:42 +02:00
*
* Return : 0 on success , negative value for failure .
*/
2020-06-15 17:45:12 +03:00
int hl_pci_set_outbound_region ( struct hl_device * hdev ,
struct hl_outbound_pci_region * pci_region )
2019-03-05 16:48:42 +02:00
{
struct asic_fixed_properties * prop = & hdev - > asic_prop ;
2020-06-15 17:45:12 +03:00
u64 outbound_region_end_address ;
2019-03-05 16:48:42 +02:00
int rc = 0 ;
2020-06-15 17:45:12 +03:00
/* Outbound Region 0 */
outbound_region_end_address =
pci_region - > addr + pci_region - > size - 1 ;
2019-03-05 16:48:42 +02:00
rc | = hl_pci_iatu_write ( hdev , 0x008 ,
2020-06-15 17:45:12 +03:00
lower_32_bits ( pci_region - > addr ) ) ;
2019-03-05 16:48:42 +02:00
rc | = hl_pci_iatu_write ( hdev , 0x00C ,
2020-06-15 17:45:12 +03:00
upper_32_bits ( pci_region - > addr ) ) ;
rc | = hl_pci_iatu_write ( hdev , 0x010 ,
lower_32_bits ( outbound_region_end_address ) ) ;
2019-03-05 16:48:42 +02:00
rc | = hl_pci_iatu_write ( hdev , 0x014 , 0 ) ;
2020-03-29 13:18:30 +03:00
if ( ( hdev - > power9_64bit_dma_enable ) & & ( hdev - > dma_mask = = 64 ) )
rc | = hl_pci_iatu_write ( hdev , 0x018 , 0x08000000 ) ;
else
rc | = hl_pci_iatu_write ( hdev , 0x018 , 0 ) ;
2020-06-15 17:45:12 +03:00
rc | = hl_pci_iatu_write ( hdev , 0x020 ,
upper_32_bits ( outbound_region_end_address ) ) ;
2019-03-05 16:48:42 +02:00
/* Increase region size */
rc | = hl_pci_iatu_write ( hdev , 0x000 , 0x00002000 ) ;
/* Enable */
rc | = hl_pci_iatu_write ( hdev , 0x004 , 0x80000000 ) ;
/* Return the DBI window to the default location */
rc | = hl_pci_elbi_write ( hdev , prop - > pcie_aux_dbi_reg_addr , 0 ) ;
rc | = hl_pci_elbi_write ( hdev , prop - > pcie_aux_dbi_reg_addr + 4 , 0 ) ;
2020-06-15 17:45:12 +03:00
return rc ;
2019-03-05 16:48:42 +02:00
}
/**
2019-03-07 18:03:23 +02:00
* hl_pci_set_dma_mask ( ) - Set DMA masks for the device .
2019-03-05 16:48:42 +02:00
* @ hdev : Pointer to hl_device structure .
*
2019-03-07 18:03:23 +02:00
* This function sets the DMA masks ( regular and consistent ) for a specified
* value . If it doesn ' t succeed , it tries to set it to a fall - back value
2019-03-05 16:48:42 +02:00
*
* Return : 0 on success , non - zero for failure .
*/
2020-04-06 00:01:31 +08:00
static int hl_pci_set_dma_mask ( struct hl_device * hdev )
2019-03-05 16:48:42 +02:00
{
struct pci_dev * pdev = hdev - > pdev ;
int rc ;
/* set DMA mask */
2020-03-29 13:18:30 +03:00
rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( hdev - > dma_mask ) ) ;
2019-03-05 16:48:42 +02:00
if ( rc ) {
2020-03-29 13:18:30 +03:00
dev_err ( hdev - > dev ,
2019-03-07 18:03:23 +02:00
" Failed to set pci dma mask to %d bits, error %d \n " ,
2020-03-29 13:18:30 +03:00
hdev - > dma_mask , rc ) ;
return rc ;
2019-03-05 16:48:42 +02:00
}
2020-03-29 13:18:30 +03:00
rc = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( hdev - > dma_mask ) ) ;
2019-03-05 16:48:42 +02:00
if ( rc ) {
2019-03-07 18:03:23 +02:00
dev_err ( hdev - > dev ,
" Failed to set pci consistent dma mask to %d bits, error %d \n " ,
2020-03-29 13:18:30 +03:00
hdev - > dma_mask , rc ) ;
2019-03-07 18:03:23 +02:00
return rc ;
2019-03-05 16:48:42 +02:00
}
2019-03-07 18:03:23 +02:00
return 0 ;
}
/**
* hl_pci_init ( ) - PCI initialization code .
* @ hdev : Pointer to hl_device structure .
*
* Set DMA masks , initialize the PCI controller and map the PCI BARs .
*
* Return : 0 on success , non - zero for failure .
*/
2020-03-29 13:18:30 +03:00
int hl_pci_init ( struct hl_device * hdev )
2019-03-07 18:03:23 +02:00
{
struct pci_dev * pdev = hdev - > pdev ;
int rc ;
2019-03-05 16:48:42 +02:00
if ( hdev - > reset_pcilink )
hl_pci_reset_link_through_bridge ( hdev ) ;
rc = pci_enable_device_mem ( pdev ) ;
if ( rc ) {
dev_err ( hdev - > dev , " can't enable PCI device \n " ) ;
return rc ;
}
pci_set_master ( pdev ) ;
2020-03-29 13:18:30 +03:00
rc = hdev - > asic_funcs - > pci_bars_map ( hdev ) ;
2019-03-05 16:48:42 +02:00
if ( rc ) {
2020-03-29 13:18:30 +03:00
dev_err ( hdev - > dev , " Failed to initialize PCI BARs \n " ) ;
2019-03-05 16:48:42 +02:00
goto disable_device ;
}
2020-03-29 13:18:30 +03:00
rc = hdev - > asic_funcs - > init_iatu ( hdev ) ;
2019-03-05 16:48:42 +02:00
if ( rc ) {
2020-03-29 13:18:30 +03:00
dev_err ( hdev - > dev , " Failed to initialize iATU \n " ) ;
2019-03-05 16:48:42 +02:00
goto disable_device ;
}
2020-03-29 13:18:30 +03:00
rc = hl_pci_set_dma_mask ( hdev ) ;
if ( rc )
goto disable_device ;
2019-03-05 16:48:42 +02:00
return 0 ;
disable_device :
pci_clear_master ( pdev ) ;
pci_disable_device ( pdev ) ;
return rc ;
}
/**
* hl_fw_fini ( ) - PCI finalization code .
* @ hdev : Pointer to hl_device structure
*
* Unmap PCI bars and disable PCI device .
*/
void hl_pci_fini ( struct hl_device * hdev )
{
hl_pci_bars_unmap ( hdev ) ;
pci_clear_master ( hdev - > pdev ) ;
pci_disable_device ( hdev - > pdev ) ;
}