2019-05-22 10:51:24 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2011-10-24 17:07:03 +04:00
/*
* Virtio memory mapped device driver
*
2015-01-23 07:15:55 +03:00
* Copyright 2011 - 2014 , ARM Ltd .
2011-10-24 17:07:03 +04:00
*
* This module allows virtio devices to be used over a virtual , memory mapped
* platform device .
*
2012-05-09 21:30:16 +04:00
* The guest device ( s ) may be instantiated in one of three equivalent ways :
*
* 1. Static platform device in board ' s code , eg . :
*
* static struct platform_device v2m_virtio_device = {
* . name = " virtio-mmio " ,
* . id = - 1 ,
* . num_resources = 2 ,
* . resource = ( struct resource [ ] ) {
* {
* . start = 0x1001e000 ,
* . end = 0x1001e0ff ,
* . flags = IORESOURCE_MEM ,
* } , {
* . start = 42 + 32 ,
* . end = 42 + 32 ,
* . flags = IORESOURCE_IRQ ,
* } ,
* }
* } ;
*
* 2. Device Tree node , eg . :
*
* virtio_block @ 1e000 {
* compatible = " virtio,mmio " ;
* reg = < 0x1e000 0x100 > ;
* interrupts = < 42 > ;
* }
*
* 3. Kernel module ( or command line ) parameter . Can be used more than once -
* one device will be created for each one . Syntax :
*
* [ virtio_mmio . ] device = < size > @ < baseaddr > : < irq > [ : < id > ]
* where :
* < size > : = size ( can use standard suffixes like K , M or G )
* < baseaddr > : = physical base address
* < irq > : = interrupt number ( as passed to request_irq ( ) )
* < id > : = ( optional ) platform device id
* eg . :
* virtio_mmio . device = 0x100 @ 0x100b0000 : 48 \
* virtio_mmio . device = 1 K @ 0x1001e000 : 74
*
2011-10-24 17:07:03 +04:00
* Based on Virtio PCI driver by Anthony Liguori , copyright IBM Corp . 2007
*/
2012-05-09 21:30:16 +04:00
# define pr_fmt(fmt) "virtio-mmio: " fmt
2015-07-28 12:44:02 +03:00
# include <linux/acpi.h>
2017-01-10 20:51:17 +03:00
# include <linux/dma-mapping.h>
2011-10-24 17:07:03 +04:00
# include <linux/highmem.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/list.h>
# include <linux/module.h>
2023-04-05 23:27:21 +03:00
# include <linux/of.h>
2011-10-24 17:07:03 +04:00
# include <linux/platform_device.h>
2022-06-21 14:06:20 +03:00
# include <linux/pm.h>
2011-10-24 17:07:03 +04:00
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/virtio.h>
# include <linux/virtio_config.h>
2017-01-13 00:36:32 +03:00
# include <uapi/linux/virtio_mmio.h>
2011-10-24 17:07:03 +04:00
# include <linux/virtio_ring.h>
/* The alignment to use between consumer and producer parts of vring.
* Currently hardcoded to the page size . */
# define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
# define to_virtio_mmio_device(_plat_dev) \
container_of ( _plat_dev , struct virtio_mmio_device , vdev )
struct virtio_mmio_device {
struct virtio_device vdev ;
struct platform_device * pdev ;
void __iomem * base ;
unsigned long version ;
/* a list of queues so we can dispatch IRQs */
spinlock_t lock ;
struct list_head virtqueues ;
} ;
struct virtio_mmio_vq_info {
/* the actual virtqueue */
struct virtqueue * vq ;
/* the list node for the virtqueues list */
struct list_head node ;
} ;
/* Configuration interface */
2014-10-07 18:39:43 +04:00
static u64 vm_get_features ( struct virtio_device * vdev )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2015-01-23 07:15:55 +03:00
u64 features ;
writel ( 1 , vm_dev - > base + VIRTIO_MMIO_DEVICE_FEATURES_SEL ) ;
features = readl ( vm_dev - > base + VIRTIO_MMIO_DEVICE_FEATURES ) ;
features < < = 32 ;
2011-10-24 17:07:03 +04:00
2015-01-23 07:15:55 +03:00
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_DEVICE_FEATURES_SEL ) ;
features | = readl ( vm_dev - > base + VIRTIO_MMIO_DEVICE_FEATURES ) ;
2011-10-24 17:07:03 +04:00
2015-01-23 07:15:55 +03:00
return features ;
2011-10-24 17:07:03 +04:00
}
2014-12-04 21:20:27 +03:00
static int vm_finalize_features ( struct virtio_device * vdev )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
/* Give virtio_ring a chance to accept features. */
vring_transport_features ( vdev ) ;
2021-02-06 15:46:59 +03:00
/* Make sure there are no mixed devices */
2015-01-23 07:15:55 +03:00
if ( vm_dev - > version = = 2 & &
! __virtio_test_bit ( vdev , VIRTIO_F_VERSION_1 ) ) {
dev_err ( & vdev - > dev , " New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature! \n " ) ;
return - EINVAL ;
}
writel ( 1 , vm_dev - > base + VIRTIO_MMIO_DRIVER_FEATURES_SEL ) ;
writel ( ( u32 ) ( vdev - > features > > 32 ) ,
vm_dev - > base + VIRTIO_MMIO_DRIVER_FEATURES ) ;
2014-11-27 14:45:58 +03:00
2015-01-23 07:15:55 +03:00
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_DRIVER_FEATURES_SEL ) ;
writel ( ( u32 ) vdev - > features ,
vm_dev - > base + VIRTIO_MMIO_DRIVER_FEATURES ) ;
2014-12-04 21:20:27 +03:00
return 0 ;
2011-10-24 17:07:03 +04:00
}
2022-04-18 05:54:35 +03:00
static void vm_get ( struct virtio_device * vdev , unsigned int offset ,
void * buf , unsigned int len )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2015-03-17 04:41:30 +03:00
void __iomem * base = vm_dev - > base + VIRTIO_MMIO_CONFIG ;
u8 b ;
__le16 w ;
__le32 l ;
if ( vm_dev - > version = = 1 ) {
u8 * ptr = buf ;
int i ;
for ( i = 0 ; i < len ; i + + )
ptr [ i ] = readb ( base + offset + i ) ;
return ;
}
2011-10-24 17:07:03 +04:00
2015-03-17 04:41:30 +03:00
switch ( len ) {
case 1 :
b = readb ( base + offset ) ;
memcpy ( buf , & b , sizeof b ) ;
break ;
case 2 :
w = cpu_to_le16 ( readw ( base + offset ) ) ;
memcpy ( buf , & w , sizeof w ) ;
break ;
case 4 :
l = cpu_to_le32 ( readl ( base + offset ) ) ;
memcpy ( buf , & l , sizeof l ) ;
break ;
case 8 :
l = cpu_to_le32 ( readl ( base + offset ) ) ;
memcpy ( buf , & l , sizeof l ) ;
l = cpu_to_le32 ( ioread32 ( base + offset + sizeof l ) ) ;
memcpy ( buf + sizeof l , & l , sizeof l ) ;
break ;
default :
BUG ( ) ;
}
2011-10-24 17:07:03 +04:00
}
2022-04-18 05:54:35 +03:00
static void vm_set ( struct virtio_device * vdev , unsigned int offset ,
const void * buf , unsigned int len )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2015-03-17 04:41:30 +03:00
void __iomem * base = vm_dev - > base + VIRTIO_MMIO_CONFIG ;
u8 b ;
__le16 w ;
__le32 l ;
if ( vm_dev - > version = = 1 ) {
const u8 * ptr = buf ;
int i ;
for ( i = 0 ; i < len ; i + + )
writeb ( ptr [ i ] , base + offset + i ) ;
2011-10-24 17:07:03 +04:00
2015-03-17 04:41:30 +03:00
return ;
}
switch ( len ) {
case 1 :
memcpy ( & b , buf , sizeof b ) ;
writeb ( b , base + offset ) ;
break ;
case 2 :
memcpy ( & w , buf , sizeof w ) ;
writew ( le16_to_cpu ( w ) , base + offset ) ;
break ;
case 4 :
memcpy ( & l , buf , sizeof l ) ;
writel ( le32_to_cpu ( l ) , base + offset ) ;
break ;
case 8 :
memcpy ( & l , buf , sizeof l ) ;
writel ( le32_to_cpu ( l ) , base + offset ) ;
memcpy ( & l , buf + sizeof l , sizeof l ) ;
writel ( le32_to_cpu ( l ) , base + offset + sizeof l ) ;
break ;
default :
BUG ( ) ;
}
2011-10-24 17:07:03 +04:00
}
2015-03-12 05:26:43 +03:00
static u32 vm_generation ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
if ( vm_dev - > version = = 1 )
return 0 ;
else
return readl ( vm_dev - > base + VIRTIO_MMIO_CONFIG_GENERATION ) ;
}
2011-10-24 17:07:03 +04:00
static u8 vm_get_status ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
return readl ( vm_dev - > base + VIRTIO_MMIO_STATUS ) & 0xff ;
}
static void vm_set_status ( struct virtio_device * vdev , u8 status )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
/* We should never be setting status to 0. */
BUG_ON ( status = = 0 ) ;
2022-05-27 09:01:19 +03:00
/*
* Per memory - barriers . txt , wmb ( ) is not needed to guarantee
2022-06-09 06:11:06 +03:00
* that the cache coherent memory writes have completed
2022-05-27 09:01:19 +03:00
* before writing to the MMIO region .
*/
2011-10-24 17:07:03 +04:00
writel ( status , vm_dev - > base + VIRTIO_MMIO_STATUS ) ;
}
static void vm_reset ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
/* 0 status means a reset. */
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_STATUS ) ;
}
/* Transport interface */
/* the notify function used when creating a virt queue */
2013-10-29 03:08:50 +04:00
static bool vm_notify ( struct virtqueue * vq )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vq - > vdev ) ;
/* We write the queue's selector into the notification register to
* signal the other end */
2012-10-16 17:26:14 +04:00
writel ( vq - > index , vm_dev - > base + VIRTIO_MMIO_QUEUE_NOTIFY ) ;
2013-10-29 03:08:50 +04:00
return true ;
2011-10-24 17:07:03 +04:00
}
2023-04-13 11:18:54 +03:00
static bool vm_notify_with_data ( struct virtqueue * vq )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vq - > vdev ) ;
u32 data = vring_notification_data ( vq ) ;
writel ( data , vm_dev - > base + VIRTIO_MMIO_QUEUE_NOTIFY ) ;
return true ;
}
2011-10-24 17:07:03 +04:00
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vm_interrupt ( int irq , void * opaque )
{
struct virtio_mmio_device * vm_dev = opaque ;
struct virtio_mmio_vq_info * info ;
unsigned long status ;
unsigned long flags ;
irqreturn_t ret = IRQ_NONE ;
/* Read and acknowledge interrupts */
status = readl ( vm_dev - > base + VIRTIO_MMIO_INTERRUPT_STATUS ) ;
writel ( status , vm_dev - > base + VIRTIO_MMIO_INTERRUPT_ACK ) ;
2014-10-14 04:10:34 +04:00
if ( unlikely ( status & VIRTIO_MMIO_INT_CONFIG ) ) {
virtio_config_changed ( & vm_dev - > vdev ) ;
2011-10-24 17:07:03 +04:00
ret = IRQ_HANDLED ;
}
if ( likely ( status & VIRTIO_MMIO_INT_VRING ) ) {
spin_lock_irqsave ( & vm_dev - > lock , flags ) ;
list_for_each_entry ( info , & vm_dev - > virtqueues , node )
ret | = vring_interrupt ( irq , info - > vq ) ;
spin_unlock_irqrestore ( & vm_dev - > lock , flags ) ;
}
return ret ;
}
static void vm_del_vq ( struct virtqueue * vq )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vq - > vdev ) ;
struct virtio_mmio_vq_info * info = vq - > priv ;
2016-02-03 08:46:38 +03:00
unsigned long flags ;
2012-10-16 17:26:14 +04:00
unsigned int index = vq - > index ;
2011-10-24 17:07:03 +04:00
spin_lock_irqsave ( & vm_dev - > lock , flags ) ;
list_del ( & info - > node ) ;
spin_unlock_irqrestore ( & vm_dev - > lock , flags ) ;
/* Select and deactivate the queue */
2012-08-28 15:54:13 +04:00
writel ( index , vm_dev - > base + VIRTIO_MMIO_QUEUE_SEL ) ;
2015-01-23 07:15:55 +03:00
if ( vm_dev - > version = = 1 ) {
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_QUEUE_PFN ) ;
} else {
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_QUEUE_READY ) ;
WARN_ON ( readl ( vm_dev - > base + VIRTIO_MMIO_QUEUE_READY ) ) ;
}
2011-10-24 17:07:03 +04:00
2016-02-03 08:46:38 +03:00
vring_del_virtqueue ( vq ) ;
2011-10-24 17:07:03 +04:00
kfree ( info ) ;
}
static void vm_del_vqs ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
struct virtqueue * vq , * n ;
list_for_each_entry_safe ( vq , n , & vdev - > vqs , list )
vm_del_vq ( vq ) ;
free_irq ( platform_get_irq ( vm_dev - > pdev , 0 ) , vm_dev ) ;
}
2022-05-27 09:01:16 +03:00
static void vm_synchronize_cbs ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
synchronize_irq ( platform_get_irq ( vm_dev - > pdev , 0 ) ) ;
}
2022-04-18 05:54:35 +03:00
static struct virtqueue * vm_setup_vq ( struct virtio_device * vdev , unsigned int index ,
2011-10-24 17:07:03 +04:00
void ( * callback ) ( struct virtqueue * vq ) ,
2022-08-16 08:36:35 +03:00
const char * name , bool ctx )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2023-04-13 11:18:54 +03:00
bool ( * notify ) ( struct virtqueue * vq ) ;
2011-10-24 17:07:03 +04:00
struct virtio_mmio_vq_info * info ;
struct virtqueue * vq ;
2016-02-03 08:46:38 +03:00
unsigned long flags ;
unsigned int num ;
2011-10-24 17:07:03 +04:00
int err ;
2023-04-13 11:18:54 +03:00
if ( __virtio_test_bit ( vdev , VIRTIO_F_NOTIFICATION_DATA ) )
notify = vm_notify_with_data ;
else
notify = vm_notify ;
2012-09-05 22:47:45 +04:00
if ( ! name )
return NULL ;
2011-10-24 17:07:03 +04:00
/* Select the queue we're interested in */
writel ( index , vm_dev - > base + VIRTIO_MMIO_QUEUE_SEL ) ;
/* Queue shouldn't already be set up. */
2015-01-23 07:15:55 +03:00
if ( readl ( vm_dev - > base + ( vm_dev - > version = = 1 ?
VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY ) ) ) {
2011-10-24 17:07:03 +04:00
err = - ENOENT ;
goto error_available ;
}
/* Allocate and fill out our active queue description */
info = kmalloc ( sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info ) {
err = - ENOMEM ;
goto error_kmalloc ;
}
2016-02-03 08:46:38 +03:00
num = readl ( vm_dev - > base + VIRTIO_MMIO_QUEUE_NUM_MAX ) ;
if ( num = = 0 ) {
2012-09-24 17:33:42 +04:00
err = - ENOENT ;
2016-02-03 08:46:38 +03:00
goto error_new_virtqueue ;
2011-10-24 17:07:03 +04:00
}
/* Create the vring */
2022-08-16 08:36:35 +03:00
vq = vring_create_virtqueue ( index , num , VIRTIO_MMIO_VRING_ALIGN , vdev ,
2023-04-13 11:18:54 +03:00
true , true , ctx , notify , callback , name ) ;
2011-10-24 17:07:03 +04:00
if ( ! vq ) {
err = - ENOMEM ;
goto error_new_virtqueue ;
}
2022-08-01 09:38:21 +03:00
vq - > num_max = num ;
2015-01-23 07:15:55 +03:00
/* Activate the queue */
2016-02-03 08:46:38 +03:00
writel ( virtqueue_get_vring_size ( vq ) , vm_dev - > base + VIRTIO_MMIO_QUEUE_NUM ) ;
2015-01-23 07:15:55 +03:00
if ( vm_dev - > version = = 1 ) {
2018-07-18 12:18:44 +03:00
u64 q_pfn = virtqueue_get_desc_addr ( vq ) > > PAGE_SHIFT ;
/*
* virtio - mmio v1 uses a 32 bit QUEUE PFN . If we have something
* that doesn ' t fit in 32 bit , fail the setup rather than
* pretending to be successful .
*/
if ( q_pfn > > 32 ) {
dev_err ( & vdev - > dev ,
" platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB \n " ,
0x1ULL < < ( 32 + PAGE_SHIFT - 30 ) ) ;
err = - E2BIG ;
goto error_bad_pfn ;
}
2015-01-23 07:15:55 +03:00
writel ( PAGE_SIZE , vm_dev - > base + VIRTIO_MMIO_QUEUE_ALIGN ) ;
2018-07-18 12:18:44 +03:00
writel ( q_pfn , vm_dev - > base + VIRTIO_MMIO_QUEUE_PFN ) ;
2015-01-23 07:15:55 +03:00
} else {
u64 addr ;
2016-02-03 08:46:38 +03:00
addr = virtqueue_get_desc_addr ( vq ) ;
2015-01-23 07:15:55 +03:00
writel ( ( u32 ) addr , vm_dev - > base + VIRTIO_MMIO_QUEUE_DESC_LOW ) ;
writel ( ( u32 ) ( addr > > 32 ) ,
vm_dev - > base + VIRTIO_MMIO_QUEUE_DESC_HIGH ) ;
2016-02-03 08:46:38 +03:00
addr = virtqueue_get_avail_addr ( vq ) ;
2015-01-23 07:15:55 +03:00
writel ( ( u32 ) addr , vm_dev - > base + VIRTIO_MMIO_QUEUE_AVAIL_LOW ) ;
writel ( ( u32 ) ( addr > > 32 ) ,
vm_dev - > base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH ) ;
2016-02-03 08:46:38 +03:00
addr = virtqueue_get_used_addr ( vq ) ;
2015-01-23 07:15:55 +03:00
writel ( ( u32 ) addr , vm_dev - > base + VIRTIO_MMIO_QUEUE_USED_LOW ) ;
writel ( ( u32 ) ( addr > > 32 ) ,
vm_dev - > base + VIRTIO_MMIO_QUEUE_USED_HIGH ) ;
writel ( 1 , vm_dev - > base + VIRTIO_MMIO_QUEUE_READY ) ;
}
2011-10-24 17:07:03 +04:00
vq - > priv = info ;
info - > vq = vq ;
spin_lock_irqsave ( & vm_dev - > lock , flags ) ;
list_add ( & info - > node , & vm_dev - > virtqueues ) ;
spin_unlock_irqrestore ( & vm_dev - > lock , flags ) ;
return vq ;
2018-07-18 12:18:44 +03:00
error_bad_pfn :
vring_del_virtqueue ( vq ) ;
2011-10-24 17:07:03 +04:00
error_new_virtqueue :
2015-01-23 07:15:55 +03:00
if ( vm_dev - > version = = 1 ) {
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_QUEUE_PFN ) ;
} else {
writel ( 0 , vm_dev - > base + VIRTIO_MMIO_QUEUE_READY ) ;
WARN_ON ( readl ( vm_dev - > base + VIRTIO_MMIO_QUEUE_READY ) ) ;
}
2011-10-24 17:07:03 +04:00
kfree ( info ) ;
error_kmalloc :
error_available :
return ERR_PTR ( err ) ;
}
2022-04-18 05:54:35 +03:00
static int vm_find_vqs ( struct virtio_device * vdev , unsigned int nvqs ,
2011-10-24 17:07:03 +04:00
struct virtqueue * vqs [ ] ,
vq_callback_t * callbacks [ ] ,
2017-02-05 20:15:22 +03:00
const char * const names [ ] ,
2017-03-06 19:32:29 +03:00
const bool * ctx ,
2017-02-05 20:15:22 +03:00
struct irq_affinity * desc )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2019-07-02 17:48:18 +03:00
int irq = platform_get_irq ( vm_dev - > pdev , 0 ) ;
2018-12-28 05:26:26 +03:00
int i , err , queue_idx = 0 ;
2011-10-24 17:07:03 +04:00
2020-04-05 20:14:10 +03:00
if ( irq < 0 )
2019-07-02 17:48:18 +03:00
return irq ;
2011-10-24 17:07:03 +04:00
err = request_irq ( irq , vm_interrupt , IRQF_SHARED ,
dev_name ( & vdev - > dev ) , vm_dev ) ;
if ( err )
return err ;
2022-06-10 11:58:27 +03:00
if ( of_property_read_bool ( vm_dev - > pdev - > dev . of_node , " wakeup-source " ) )
enable_irq_wake ( irq ) ;
2011-10-24 17:07:03 +04:00
for ( i = 0 ; i < nvqs ; + + i ) {
2018-12-28 05:26:26 +03:00
if ( ! names [ i ] ) {
vqs [ i ] = NULL ;
continue ;
}
vqs [ i ] = vm_setup_vq ( vdev , queue_idx + + , callbacks [ i ] , names [ i ] ,
2017-03-06 19:32:29 +03:00
ctx ? ctx [ i ] : false ) ;
2011-10-24 17:07:03 +04:00
if ( IS_ERR ( vqs [ i ] ) ) {
vm_del_vqs ( vdev ) ;
return PTR_ERR ( vqs [ i ] ) ;
}
}
return 0 ;
}
2011-11-14 18:17:08 +04:00
static const char * vm_bus_name ( struct virtio_device * vdev )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2011-10-24 17:07:03 +04:00
2011-11-14 18:17:08 +04:00
return vm_dev - > pdev - > name ;
}
2011-10-24 17:07:03 +04:00
2020-08-20 01:19:43 +03:00
static bool vm_get_shm_region ( struct virtio_device * vdev ,
struct virtio_shm_region * region , u8 id )
{
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
u64 len , addr ;
/* Select the region we're interested in */
writel ( id , vm_dev - > base + VIRTIO_MMIO_SHM_SEL ) ;
/* Read the region size */
len = ( u64 ) readl ( vm_dev - > base + VIRTIO_MMIO_SHM_LEN_LOW ) ;
len | = ( u64 ) readl ( vm_dev - > base + VIRTIO_MMIO_SHM_LEN_HIGH ) < < 32 ;
region - > len = len ;
/* Check if region length is -1. If that's the case, the shared memory
* region does not exist and there is no need to proceed further .
*/
if ( len = = ~ ( u64 ) 0 )
return false ;
/* Read the region base address */
addr = ( u64 ) readl ( vm_dev - > base + VIRTIO_MMIO_SHM_BASE_LOW ) ;
addr | = ( u64 ) readl ( vm_dev - > base + VIRTIO_MMIO_SHM_BASE_HIGH ) < < 32 ;
region - > addr = addr ;
return true ;
}
2013-02-10 09:27:38 +04:00
static const struct virtio_config_ops virtio_mmio_config_ops = {
2011-10-24 17:07:03 +04:00
. get = vm_get ,
. set = vm_set ,
2015-03-12 05:26:43 +03:00
. generation = vm_generation ,
2011-10-24 17:07:03 +04:00
. get_status = vm_get_status ,
. set_status = vm_set_status ,
. reset = vm_reset ,
. find_vqs = vm_find_vqs ,
. del_vqs = vm_del_vqs ,
. get_features = vm_get_features ,
. finalize_features = vm_finalize_features ,
2011-11-14 18:17:08 +04:00
. bus_name = vm_bus_name ,
2020-08-20 01:19:43 +03:00
. get_shm_region = vm_get_shm_region ,
2022-05-27 09:01:16 +03:00
. synchronize_cbs = vm_synchronize_cbs ,
2011-10-24 17:07:03 +04:00
} ;
2022-06-21 14:06:20 +03:00
# ifdef CONFIG_PM_SLEEP
static int virtio_mmio_freeze ( struct device * dev )
{
struct virtio_mmio_device * vm_dev = dev_get_drvdata ( dev ) ;
return virtio_device_freeze ( & vm_dev - > vdev ) ;
}
static int virtio_mmio_restore ( struct device * dev )
{
struct virtio_mmio_device * vm_dev = dev_get_drvdata ( dev ) ;
2022-06-21 14:06:21 +03:00
if ( vm_dev - > version = = 1 )
writel ( PAGE_SIZE , vm_dev - > base + VIRTIO_MMIO_GUEST_PAGE_SIZE ) ;
2022-06-21 14:06:20 +03:00
return virtio_device_restore ( & vm_dev - > vdev ) ;
}
static const struct dev_pm_ops virtio_mmio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( virtio_mmio_freeze , virtio_mmio_restore )
} ;
# endif
2011-10-24 17:07:03 +04:00
2017-12-06 16:59:16 +03:00
static void virtio_mmio_release_dev ( struct device * _d )
{
struct virtio_device * vdev =
container_of ( _d , struct virtio_device , dev ) ;
2021-02-22 08:57:24 +03:00
struct virtio_mmio_device * vm_dev = to_virtio_mmio_device ( vdev ) ;
2017-12-06 16:59:16 +03:00
2023-06-29 15:05:26 +03:00
kfree ( vm_dev ) ;
2017-12-06 16:59:16 +03:00
}
2011-10-24 17:07:03 +04:00
/* Platform device */
2012-12-22 01:05:30 +04:00
static int virtio_mmio_probe ( struct platform_device * pdev )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev ;
unsigned long magic ;
2017-01-10 20:51:17 +03:00
int rc ;
2011-10-24 17:07:03 +04:00
2023-06-29 15:05:26 +03:00
vm_dev = kzalloc ( sizeof ( * vm_dev ) , GFP_KERNEL ) ;
2017-12-12 16:45:50 +03:00
if ( ! vm_dev )
return - ENOMEM ;
2011-10-24 17:07:03 +04:00
vm_dev - > vdev . dev . parent = & pdev - > dev ;
2017-12-06 16:59:16 +03:00
vm_dev - > vdev . dev . release = virtio_mmio_release_dev ;
2011-10-24 17:07:03 +04:00
vm_dev - > vdev . config = & virtio_mmio_config_ops ;
vm_dev - > pdev = pdev ;
INIT_LIST_HEAD ( & vm_dev - > virtqueues ) ;
spin_lock_init ( & vm_dev - > lock ) ;
2019-12-22 22:08:39 +03:00
vm_dev - > base = devm_platform_ioremap_resource ( pdev , 0 ) ;
2023-09-11 12:03:29 +03:00
if ( IS_ERR ( vm_dev - > base ) ) {
rc = PTR_ERR ( vm_dev - > base ) ;
goto free_vm_dev ;
}
2011-10-24 17:07:03 +04:00
/* Check magic value */
magic = readl ( vm_dev - > base + VIRTIO_MMIO_MAGIC_VALUE ) ;
2013-11-05 14:51:28 +04:00
if ( magic ! = ( ' v ' | ' i ' < < 8 | ' r ' < < 16 | ' t ' < < 24 ) ) {
2011-10-24 17:07:03 +04:00
dev_warn ( & pdev - > dev , " Wrong magic value 0x%08lx! \n " , magic ) ;
2023-09-11 12:03:29 +03:00
rc = - ENODEV ;
goto free_vm_dev ;
2011-10-24 17:07:03 +04:00
}
/* Check device version */
vm_dev - > version = readl ( vm_dev - > base + VIRTIO_MMIO_VERSION ) ;
2015-01-23 07:15:55 +03:00
if ( vm_dev - > version < 1 | | vm_dev - > version > 2 ) {
2011-10-24 17:07:03 +04:00
dev_err ( & pdev - > dev , " Version %ld not supported! \n " ,
vm_dev - > version ) ;
2023-09-11 12:03:29 +03:00
rc = - ENXIO ;
goto free_vm_dev ;
2011-10-24 17:07:03 +04:00
}
vm_dev - > vdev . id . device = readl ( vm_dev - > base + VIRTIO_MMIO_DEVICE_ID ) ;
2015-01-23 07:15:55 +03:00
if ( vm_dev - > vdev . id . device = = 0 ) {
/*
* virtio - mmio device with an ID 0 is a ( dummy ) placeholder
* with no function . End probing now with no error reported .
*/
2023-09-11 12:03:29 +03:00
rc = - ENODEV ;
goto free_vm_dev ;
2015-01-23 07:15:55 +03:00
}
2011-10-24 17:07:03 +04:00
vm_dev - > vdev . id . vendor = readl ( vm_dev - > base + VIRTIO_MMIO_VENDOR_ID ) ;
2017-01-10 20:51:17 +03:00
if ( vm_dev - > version = = 1 ) {
2015-01-23 07:15:55 +03:00
writel ( PAGE_SIZE , vm_dev - > base + VIRTIO_MMIO_GUEST_PAGE_SIZE ) ;
2011-10-24 17:07:03 +04:00
2017-01-10 20:51:17 +03:00
rc = dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ;
/*
* In the legacy case , ensure our coherently - allocated virtio
* ring will be at an address expressable as a 32 - bit PFN .
*/
if ( ! rc )
dma_set_coherent_mask ( & pdev - > dev ,
DMA_BIT_MASK ( 32 + PAGE_SHIFT ) ) ;
} else {
rc = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ;
}
if ( rc )
rc = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
if ( rc )
dev_warn ( & pdev - > dev , " Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work. \n " ) ;
2011-10-24 17:07:03 +04:00
platform_set_drvdata ( pdev , vm_dev ) ;
2017-12-06 16:59:16 +03:00
rc = register_virtio_device ( & vm_dev - > vdev ) ;
2017-12-12 16:45:50 +03:00
if ( rc )
2017-12-06 16:59:16 +03:00
put_device ( & vm_dev - > vdev . dev ) ;
2017-12-12 16:45:50 +03:00
2017-12-06 16:59:16 +03:00
return rc ;
2023-09-11 12:03:29 +03:00
free_vm_dev :
kfree ( vm_dev ) ;
return rc ;
2011-10-24 17:07:03 +04:00
}
2012-12-22 01:05:30 +04:00
static int virtio_mmio_remove ( struct platform_device * pdev )
2011-10-24 17:07:03 +04:00
{
struct virtio_mmio_device * vm_dev = platform_get_drvdata ( pdev ) ;
unregister_virtio_device ( & vm_dev - > vdev ) ;
return 0 ;
}
2012-05-09 21:30:16 +04:00
/* Devices list parameter */
# if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
static struct device vm_cmdline_parent = {
. init_name = " virtio-mmio-cmdline " ,
} ;
static int vm_cmdline_parent_registered ;
static int vm_cmdline_id ;
static int vm_cmdline_set ( const char * device ,
const struct kernel_param * kp )
{
int err ;
struct resource resources [ 2 ] = { } ;
char * str ;
2022-04-18 05:56:24 +03:00
long long base , size ;
2012-11-22 06:00:24 +04:00
unsigned int irq ;
2012-05-09 21:30:16 +04:00
int processed , consumed = 0 ;
struct platform_device * pdev ;
2012-11-22 06:00:24 +04:00
/* Consume "size" part of the command line parameter */
size = memparse ( device , & str ) ;
2012-05-09 21:30:16 +04:00
2012-11-22 06:00:24 +04:00
/* Get "@<base>:<irq>[:<id>]" chunks */
2012-05-09 21:30:16 +04:00
processed = sscanf ( str , " @%lli:%u%n:%d%n " ,
2012-11-22 06:00:24 +04:00
& base , & irq , & consumed ,
2012-05-09 21:30:16 +04:00
& vm_cmdline_id , & consumed ) ;
2012-11-22 06:00:24 +04:00
/*
2020-07-01 23:53:15 +03:00
* sscanf ( ) must process at least 2 chunks ; also there
2012-11-22 06:00:24 +04:00
* must be no extra characters after the last chunk , so
* str [ consumed ] must be ' \0 '
*/
2020-07-01 23:53:15 +03:00
if ( processed < 2 | | str [ consumed ] | | irq = = 0 )
2012-05-09 21:30:16 +04:00
return - EINVAL ;
2012-11-22 06:00:24 +04:00
resources [ 0 ] . flags = IORESOURCE_MEM ;
2012-05-09 21:30:16 +04:00
resources [ 0 ] . start = base ;
2012-11-22 06:00:24 +04:00
resources [ 0 ] . end = base + size - 1 ;
resources [ 1 ] . flags = IORESOURCE_IRQ ;
resources [ 1 ] . start = resources [ 1 ] . end = irq ;
2012-05-09 21:30:16 +04:00
if ( ! vm_cmdline_parent_registered ) {
err = device_register ( & vm_cmdline_parent ) ;
if ( err ) {
2022-06-02 03:55:42 +03:00
put_device ( & vm_cmdline_parent ) ;
2012-05-09 21:30:16 +04:00
pr_err ( " Failed to register parent device! \n " ) ;
return err ;
}
vm_cmdline_parent_registered = 1 ;
}
pr_info ( " Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d. \n " ,
vm_cmdline_id ,
( unsigned long long ) resources [ 0 ] . start ,
( unsigned long long ) resources [ 0 ] . end ,
( int ) resources [ 1 ] . start ) ;
pdev = platform_device_register_resndata ( & vm_cmdline_parent ,
" virtio-mmio " , vm_cmdline_id + + ,
resources , ARRAY_SIZE ( resources ) , NULL , 0 ) ;
2017-11-28 18:15:47 +03:00
return PTR_ERR_OR_ZERO ( pdev ) ;
2012-05-09 21:30:16 +04:00
}
static int vm_cmdline_get_device ( struct device * dev , void * data )
{
char * buffer = data ;
unsigned int len = strlen ( buffer ) ;
struct platform_device * pdev = to_platform_device ( dev ) ;
snprintf ( buffer + len , PAGE_SIZE - len , " 0x%llx@0x%llx:%llu:%d \n " ,
pdev - > resource [ 0 ] . end - pdev - > resource [ 0 ] . start + 1ULL ,
( unsigned long long ) pdev - > resource [ 0 ] . start ,
( unsigned long long ) pdev - > resource [ 1 ] . start ,
pdev - > id ) ;
return 0 ;
}
static int vm_cmdline_get ( char * buffer , const struct kernel_param * kp )
{
buffer [ 0 ] = ' \0 ' ;
device_for_each_child ( & vm_cmdline_parent , buffer ,
vm_cmdline_get_device ) ;
return strlen ( buffer ) + 1 ;
}
2015-05-27 04:39:38 +03:00
static const struct kernel_param_ops vm_cmdline_param_ops = {
2012-05-09 21:30:16 +04:00
. set = vm_cmdline_set ,
. get = vm_cmdline_get ,
} ;
device_param_cb ( device , & vm_cmdline_param_ops , NULL , S_IRUSR ) ;
static int vm_unregister_cmdline_device ( struct device * dev ,
void * data )
{
platform_device_unregister ( to_platform_device ( dev ) ) ;
return 0 ;
}
static void vm_unregister_cmdline_devices ( void )
{
if ( vm_cmdline_parent_registered ) {
device_for_each_child ( & vm_cmdline_parent , NULL ,
vm_unregister_cmdline_device ) ;
device_unregister ( & vm_cmdline_parent ) ;
vm_cmdline_parent_registered = 0 ;
}
}
# else
static void vm_unregister_cmdline_devices ( void )
{
}
# endif
2011-10-24 17:07:03 +04:00
/* Platform driver */
2017-06-19 09:14:41 +03:00
static const struct of_device_id virtio_mmio_match [ ] = {
2011-10-24 17:07:03 +04:00
{ . compatible = " virtio,mmio " , } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , virtio_mmio_match ) ;
2015-07-28 12:44:02 +03:00
# ifdef CONFIG_ACPI
static const struct acpi_device_id virtio_mmio_acpi_match [ ] = {
{ " LNRO0005 " , } ,
{ }
} ;
MODULE_DEVICE_TABLE ( acpi , virtio_mmio_acpi_match ) ;
# endif
2011-10-24 17:07:03 +04:00
static struct platform_driver virtio_mmio_driver = {
. probe = virtio_mmio_probe ,
2012-12-22 01:05:30 +04:00
. remove = virtio_mmio_remove ,
2011-10-24 17:07:03 +04:00
. driver = {
. name = " virtio-mmio " ,
. of_match_table = virtio_mmio_match ,
2015-07-28 12:44:02 +03:00
. acpi_match_table = ACPI_PTR ( virtio_mmio_acpi_match ) ,
2022-06-21 14:06:20 +03:00
# ifdef CONFIG_PM_SLEEP
. pm = & virtio_mmio_pm_ops ,
# endif
2011-10-24 17:07:03 +04:00
} ,
} ;
static int __init virtio_mmio_init ( void )
{
return platform_driver_register ( & virtio_mmio_driver ) ;
}
static void __exit virtio_mmio_exit ( void )
{
platform_driver_unregister ( & virtio_mmio_driver ) ;
2012-05-09 21:30:16 +04:00
vm_unregister_cmdline_devices ( ) ;
2011-10-24 17:07:03 +04:00
}
module_init ( virtio_mmio_init ) ;
module_exit ( virtio_mmio_exit ) ;
MODULE_AUTHOR ( " Pawel Moll <pawel.moll@arm.com> " ) ;
MODULE_DESCRIPTION ( " Platform bus driver for memory mapped virtio devices " ) ;
MODULE_LICENSE ( " GPL " ) ;