2008-03-05 19:52:39 +03:00
# include <linux/delay.h>
2005-04-17 02:20:36 +04:00
# include <linux/pci.h>
# include <linux/module.h>
2006-10-18 09:47:25 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include <linux/ioport.h>
2006-10-19 19:41:28 +04:00
# include <linux/wait.h>
2005-04-17 02:20:36 +04:00
2005-11-06 03:45:08 +03:00
# include "pci.h"
2005-04-17 02:20:36 +04:00
/*
* This interrupt - safe spinlock protects all accesses to PCI
* configuration space .
*/
static DEFINE_SPINLOCK ( pci_lock ) ;
/*
* Wrappers for all PCI configuration access functions . They just check
* alignment , do locking and call the low - level functions pointed to
* by pci_dev - > ops .
*/
# define PCI_byte_BAD 0
# define PCI_word_BAD (pos & 1)
# define PCI_dword_BAD (pos & 3)
# define PCI_OP_READ(size,type,len) \
int pci_bus_read_config_ # # size \
( struct pci_bus * bus , unsigned int devfn , int pos , type * value ) \
{ \
int res ; \
unsigned long flags ; \
u32 data = 0 ; \
if ( PCI_ # # size # # _BAD ) return PCIBIOS_BAD_REGISTER_NUMBER ; \
spin_lock_irqsave ( & pci_lock , flags ) ; \
res = bus - > ops - > read ( bus , devfn , pos , len , & data ) ; \
* value = ( type ) data ; \
spin_unlock_irqrestore ( & pci_lock , flags ) ; \
return res ; \
}
# define PCI_OP_WRITE(size,type,len) \
int pci_bus_write_config_ # # size \
( struct pci_bus * bus , unsigned int devfn , int pos , type value ) \
{ \
int res ; \
unsigned long flags ; \
if ( PCI_ # # size # # _BAD ) return PCIBIOS_BAD_REGISTER_NUMBER ; \
spin_lock_irqsave ( & pci_lock , flags ) ; \
res = bus - > ops - > write ( bus , devfn , pos , len , value ) ; \
spin_unlock_irqrestore ( & pci_lock , flags ) ; \
return res ; \
}
PCI_OP_READ ( byte , u8 , 1 )
PCI_OP_READ ( word , u16 , 2 )
PCI_OP_READ ( dword , u32 , 4 )
PCI_OP_WRITE ( byte , u8 , 1 )
PCI_OP_WRITE ( word , u16 , 2 )
PCI_OP_WRITE ( dword , u32 , 4 )
EXPORT_SYMBOL ( pci_bus_read_config_byte ) ;
EXPORT_SYMBOL ( pci_bus_read_config_word ) ;
EXPORT_SYMBOL ( pci_bus_read_config_dword ) ;
EXPORT_SYMBOL ( pci_bus_write_config_byte ) ;
EXPORT_SYMBOL ( pci_bus_write_config_word ) ;
EXPORT_SYMBOL ( pci_bus_write_config_dword ) ;
2005-09-27 12:21:55 +04:00
2008-12-18 20:17:16 +03:00
/**
* pci_read_vpd - Read one entry from Vital Product Data
* @ dev : pci device struct
* @ pos : offset in vpd space
* @ count : number of bytes to read
* @ buf : pointer to where to store result
*
*/
ssize_t pci_read_vpd ( struct pci_dev * dev , loff_t pos , size_t count , void * buf )
{
if ( ! dev - > vpd | | ! dev - > vpd - > ops )
return - ENODEV ;
return dev - > vpd - > ops - > read ( dev , pos , count , buf ) ;
}
EXPORT_SYMBOL ( pci_read_vpd ) ;
/**
* pci_write_vpd - Write entry to Vital Product Data
* @ dev : pci device struct
* @ pos : offset in vpd space
* @ count : number of bytes to read
* @ val : value to write
*
*/
ssize_t pci_write_vpd ( struct pci_dev * dev , loff_t pos , size_t count , const void * buf )
{
if ( ! dev - > vpd | | ! dev - > vpd - > ops )
return - ENODEV ;
return dev - > vpd - > ops - > write ( dev , pos , count , buf ) ;
}
EXPORT_SYMBOL ( pci_write_vpd ) ;
2006-10-19 19:41:28 +04:00
/*
* The following routines are to prevent the user from accessing PCI config
* space when it ' s unsafe to do so . Some devices require this during BIST and
* we ' re required to prevent it during D - state transitions .
*
* We have a bit per device to indicate it ' s blocked and a global wait queue
* for callers to sleep on until devices are unblocked .
*/
static DECLARE_WAIT_QUEUE_HEAD ( pci_ucfg_wait ) ;
2005-09-27 12:21:55 +04:00
2006-10-19 19:41:28 +04:00
static noinline void pci_wait_ucfg ( struct pci_dev * dev )
{
DECLARE_WAITQUEUE ( wait , current ) ;
__add_wait_queue ( & pci_ucfg_wait , & wait ) ;
do {
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
spin_unlock_irq ( & pci_lock ) ;
schedule ( ) ;
spin_lock_irq ( & pci_lock ) ;
} while ( dev - > block_ucfg_access ) ;
__remove_wait_queue ( & pci_ucfg_wait , & wait ) ;
2005-09-27 12:21:55 +04:00
}
# define PCI_USER_READ_CONFIG(size,type) \
int pci_user_read_config_ # # size \
( struct pci_dev * dev , int pos , type * val ) \
{ \
int ret = 0 ; \
u32 data = - 1 ; \
if ( PCI_ # # size # # _BAD ) return PCIBIOS_BAD_REGISTER_NUMBER ; \
2006-10-19 19:41:28 +04:00
spin_lock_irq ( & pci_lock ) ; \
if ( unlikely ( dev - > block_ucfg_access ) ) pci_wait_ucfg ( dev ) ; \
ret = dev - > bus - > ops - > read ( dev - > bus , dev - > devfn , \
2005-09-27 12:21:55 +04:00
pos , sizeof ( type ) , & data ) ; \
2006-10-19 19:41:28 +04:00
spin_unlock_irq ( & pci_lock ) ; \
2005-09-27 12:21:55 +04:00
* val = ( type ) data ; \
return ret ; \
}
# define PCI_USER_WRITE_CONFIG(size,type) \
int pci_user_write_config_ # # size \
( struct pci_dev * dev , int pos , type val ) \
{ \
int ret = - EIO ; \
if ( PCI_ # # size # # _BAD ) return PCIBIOS_BAD_REGISTER_NUMBER ; \
2006-10-19 19:41:28 +04:00
spin_lock_irq ( & pci_lock ) ; \
if ( unlikely ( dev - > block_ucfg_access ) ) pci_wait_ucfg ( dev ) ; \
ret = dev - > bus - > ops - > write ( dev - > bus , dev - > devfn , \
2005-09-27 12:21:55 +04:00
pos , sizeof ( type ) , val ) ; \
2006-10-19 19:41:28 +04:00
spin_unlock_irq ( & pci_lock ) ; \
2005-09-27 12:21:55 +04:00
return ret ; \
}
PCI_USER_READ_CONFIG ( byte , u8 )
PCI_USER_READ_CONFIG ( word , u16 )
PCI_USER_READ_CONFIG ( dword , u32 )
PCI_USER_WRITE_CONFIG ( byte , u8 )
PCI_USER_WRITE_CONFIG ( word , u16 )
PCI_USER_WRITE_CONFIG ( dword , u32 )
2008-03-05 19:52:39 +03:00
/* VPD access through PCI 2.2+ VPD capability */
# define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
struct pci_vpd_pci22 {
struct pci_vpd base ;
2008-12-18 20:17:16 +03:00
struct mutex lock ;
u16 flag ;
2008-03-05 19:52:39 +03:00
bool busy ;
2008-12-18 20:17:16 +03:00
u8 cap ;
2008-03-05 19:52:39 +03:00
} ;
2008-12-18 20:17:16 +03:00
/*
* Wait for last operation to complete .
* This code has to spin since there is no other notification from the PCI
* hardware . Since the VPD is often implemented by serial attachment to an
* EEPROM , it may take many milliseconds to complete .
*/
2008-03-05 19:52:39 +03:00
static int pci_vpd_pci22_wait ( struct pci_dev * dev )
{
struct pci_vpd_pci22 * vpd =
container_of ( dev - > vpd , struct pci_vpd_pci22 , base ) ;
2008-12-18 20:17:16 +03:00
unsigned long timeout = jiffies + HZ / 20 + 2 ;
u16 status ;
2008-03-05 19:52:39 +03:00
int ret ;
if ( ! vpd - > busy )
return 0 ;
for ( ; ; ) {
2008-12-18 20:17:16 +03:00
ret = pci_user_read_config_word ( dev , vpd - > cap + PCI_VPD_ADDR ,
2008-03-05 19:52:39 +03:00
& status ) ;
2008-12-18 20:17:16 +03:00
if ( ret )
2008-03-05 19:52:39 +03:00
return ret ;
2008-12-18 20:17:16 +03:00
if ( ( status & PCI_VPD_ADDR_F ) = = vpd - > flag ) {
2008-03-05 19:52:39 +03:00
vpd - > busy = false ;
return 0 ;
}
2008-12-18 20:17:16 +03:00
if ( time_after ( jiffies , timeout ) )
2008-03-05 19:52:39 +03:00
return - ETIMEDOUT ;
2008-12-18 20:17:16 +03:00
if ( fatal_signal_pending ( current ) )
return - EINTR ;
if ( ! cond_resched ( ) )
udelay ( 10 ) ;
2008-03-05 19:52:39 +03:00
}
}
2008-12-18 20:17:16 +03:00
static ssize_t pci_vpd_pci22_read ( struct pci_dev * dev , loff_t pos , size_t count ,
void * arg )
2008-03-05 19:52:39 +03:00
{
struct pci_vpd_pci22 * vpd =
container_of ( dev - > vpd , struct pci_vpd_pci22 , base ) ;
2008-12-18 20:17:16 +03:00
int ret ;
loff_t end = pos + count ;
u8 * buf = arg ;
2008-03-05 19:52:39 +03:00
2008-12-18 20:17:16 +03:00
if ( pos < 0 | | pos > vpd - > base . len | | end > vpd - > base . len )
2008-03-05 19:52:39 +03:00
return - EINVAL ;
2008-12-18 20:17:16 +03:00
if ( mutex_lock_killable ( & vpd - > lock ) )
return - EINTR ;
2008-03-05 19:52:39 +03:00
ret = pci_vpd_pci22_wait ( dev ) ;
if ( ret < 0 )
goto out ;
2008-12-18 20:17:16 +03:00
2008-12-18 20:17:16 +03:00
while ( pos < end ) {
u32 val ;
unsigned int i , skip ;
ret = pci_user_write_config_word ( dev , vpd - > cap + PCI_VPD_ADDR ,
pos & ~ 3 ) ;
if ( ret < 0 )
break ;
vpd - > busy = true ;
vpd - > flag = PCI_VPD_ADDR_F ;
ret = pci_vpd_pci22_wait ( dev ) ;
if ( ret < 0 )
break ;
ret = pci_user_read_config_dword ( dev , vpd - > cap + PCI_VPD_DATA , & val ) ;
if ( ret < 0 )
break ;
skip = pos & 3 ;
for ( i = 0 ; i < sizeof ( u32 ) ; i + + ) {
if ( i > = skip ) {
* buf + + = val ;
if ( + + pos = = end )
break ;
}
val > > = 8 ;
}
}
2008-03-05 19:52:39 +03:00
out :
2008-12-18 20:17:16 +03:00
mutex_unlock ( & vpd - > lock ) ;
2008-12-18 20:17:16 +03:00
return ret ? ret : count ;
2008-03-05 19:52:39 +03:00
}
2008-12-18 20:17:16 +03:00
static ssize_t pci_vpd_pci22_write ( struct pci_dev * dev , loff_t pos , size_t count ,
const void * arg )
2008-03-05 19:52:39 +03:00
{
struct pci_vpd_pci22 * vpd =
container_of ( dev - > vpd , struct pci_vpd_pci22 , base ) ;
2008-12-18 20:17:16 +03:00
const u8 * buf = arg ;
loff_t end = pos + count ;
2008-12-18 20:17:16 +03:00
int ret = 0 ;
2008-03-05 19:52:39 +03:00
2008-12-18 20:17:16 +03:00
if ( pos < 0 | | ( pos & 3 ) | | ( count & 3 ) | | end > vpd - > base . len )
2008-03-05 19:52:39 +03:00
return - EINVAL ;
2008-12-18 20:17:16 +03:00
if ( mutex_lock_killable ( & vpd - > lock ) )
return - EINTR ;
2008-12-18 20:17:16 +03:00
2008-03-05 19:52:39 +03:00
ret = pci_vpd_pci22_wait ( dev ) ;
if ( ret < 0 )
goto out ;
2008-12-18 20:17:16 +03:00
while ( pos < end ) {
u32 val ;
val = * buf + + ;
val | = * buf + + < < 8 ;
val | = * buf + + < < 16 ;
val | = * buf + + < < 24 ;
ret = pci_user_write_config_dword ( dev , vpd - > cap + PCI_VPD_DATA , val ) ;
if ( ret < 0 )
break ;
ret = pci_user_write_config_word ( dev , vpd - > cap + PCI_VPD_ADDR ,
pos | PCI_VPD_ADDR_F ) ;
if ( ret < 0 )
break ;
vpd - > busy = true ;
vpd - > flag = 0 ;
ret = pci_vpd_pci22_wait ( dev ) ;
pos + = sizeof ( u32 ) ;
}
2008-03-05 19:52:39 +03:00
out :
2008-12-18 20:17:16 +03:00
mutex_unlock ( & vpd - > lock ) ;
2008-12-18 20:17:16 +03:00
return ret ? ret : count ;
2008-03-05 19:52:39 +03:00
}
static void pci_vpd_pci22_release ( struct pci_dev * dev )
{
kfree ( container_of ( dev - > vpd , struct pci_vpd_pci22 , base ) ) ;
}
2008-12-18 20:17:16 +03:00
static const struct pci_vpd_ops pci_vpd_pci22_ops = {
2008-03-05 19:52:39 +03:00
. read = pci_vpd_pci22_read ,
. write = pci_vpd_pci22_write ,
. release = pci_vpd_pci22_release ,
} ;
int pci_vpd_pci22_init ( struct pci_dev * dev )
{
struct pci_vpd_pci22 * vpd ;
u8 cap ;
cap = pci_find_capability ( dev , PCI_CAP_ID_VPD ) ;
if ( ! cap )
return - ENODEV ;
vpd = kzalloc ( sizeof ( * vpd ) , GFP_ATOMIC ) ;
if ( ! vpd )
return - ENOMEM ;
2008-07-02 21:59:04 +04:00
vpd - > base . len = PCI_VPD_PCI22_SIZE ;
2008-03-05 19:52:39 +03:00
vpd - > base . ops = & pci_vpd_pci22_ops ;
2008-12-18 20:17:16 +03:00
mutex_init ( & vpd - > lock ) ;
2008-03-05 19:52:39 +03:00
vpd - > cap = cap ;
vpd - > busy = false ;
dev - > vpd = & vpd - > base ;
return 0 ;
}
2008-12-18 20:17:16 +03:00
/**
* pci_vpd_truncate - Set available Vital Product Data size
* @ dev : pci device struct
* @ size : available memory in bytes
*
* Adjust size of available VPD area .
*/
int pci_vpd_truncate ( struct pci_dev * dev , size_t size )
{
if ( ! dev - > vpd )
return - EINVAL ;
/* limited by the access method */
if ( size > dev - > vpd - > len )
return - EINVAL ;
dev - > vpd - > len = size ;
2009-04-01 02:23:41 +04:00
if ( dev - > vpd - > attr )
dev - > vpd - > attr - > size = size ;
2008-12-18 20:17:16 +03:00
return 0 ;
}
EXPORT_SYMBOL ( pci_vpd_truncate ) ;
2005-09-27 12:21:55 +04:00
/**
* pci_block_user_cfg_access - Block userspace PCI config reads / writes
* @ dev : pci device struct
*
2006-10-19 19:41:28 +04:00
* When user access is blocked , any reads or writes to config space will
* sleep until access is unblocked again . We don ' t allow nesting of
* block / unblock calls .
*/
2005-09-27 12:21:55 +04:00
void pci_block_user_cfg_access ( struct pci_dev * dev )
{
unsigned long flags ;
2006-10-19 19:41:28 +04:00
int was_blocked ;
2005-09-27 12:21:55 +04:00
spin_lock_irqsave ( & pci_lock , flags ) ;
2006-10-19 19:41:28 +04:00
was_blocked = dev - > block_ucfg_access ;
2005-09-27 12:21:55 +04:00
dev - > block_ucfg_access = 1 ;
spin_unlock_irqrestore ( & pci_lock , flags ) ;
2006-10-19 19:41:28 +04:00
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
BUG_ON ( was_blocked ) ;
2005-09-27 12:21:55 +04:00
}
EXPORT_SYMBOL_GPL ( pci_block_user_cfg_access ) ;
/**
* pci_unblock_user_cfg_access - Unblock userspace PCI config reads / writes
* @ dev : pci device struct
*
* This function allows userspace PCI config accesses to resume .
2006-10-19 19:41:28 +04:00
*/
2005-09-27 12:21:55 +04:00
void pci_unblock_user_cfg_access ( struct pci_dev * dev )
{
unsigned long flags ;
spin_lock_irqsave ( & pci_lock , flags ) ;
2006-10-19 19:41:28 +04:00
/* This indicates a problem in the caller, but we don't need
* to kill them , unlike a double - block above . */
WARN_ON ( ! dev - > block_ucfg_access ) ;
2005-09-27 12:21:55 +04:00
dev - > block_ucfg_access = 0 ;
2006-10-19 19:41:28 +04:00
wake_up_all ( & pci_ucfg_wait ) ;
2005-09-27 12:21:55 +04:00
spin_unlock_irqrestore ( & pci_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( pci_unblock_user_cfg_access ) ;