iio: buffer-dma: Enable buffer write support
Adding write support to the buffer-dma code is easy - the write() function basically needs to do the exact same thing as the read() function: dequeue a block, read or write the data, enqueue the block when entirely processed. Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write() now both call a function iio_buffer_dma_io(), which will perform this task. Note that we preemptively reset block->bytes_used to the buffer's size in iio_dma_buffer_request_update(), as in the future the iio_dma_buffer_enqueue() function won't reset it. Signed-off-by: Paul Cercueil <paul@crapouillou.net> Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com> Signed-off-by: Nuno Sa <nuno.sa@analog.com> Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-3-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
parent
04ae3b1a76
commit
fb09febafd
@ -195,6 +195,18 @@ static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
|
||||
block->state = IIO_BLOCK_STATE_DONE;
|
||||
}
|
||||
|
||||
static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
|
||||
{
|
||||
__poll_t flags;
|
||||
|
||||
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
|
||||
flags = EPOLLIN | EPOLLRDNORM;
|
||||
else
|
||||
flags = EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
wake_up_interruptible_poll(&queue->buffer.pollq, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* iio_dma_buffer_block_done() - Indicate that a block has been completed
|
||||
* @block: The completed block
|
||||
@ -212,7 +224,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
|
||||
spin_unlock_irqrestore(&queue->list_lock, flags);
|
||||
|
||||
iio_buffer_block_put_atomic(block);
|
||||
wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
|
||||
iio_dma_buffer_queue_wake(queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
|
||||
|
||||
@ -241,7 +253,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->list_lock, flags);
|
||||
|
||||
wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
|
||||
iio_dma_buffer_queue_wake(queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
|
||||
|
||||
@ -335,8 +347,24 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
|
||||
queue->fileio.blocks[i] = block;
|
||||
}
|
||||
|
||||
block->state = IIO_BLOCK_STATE_QUEUED;
|
||||
list_add_tail(&block->head, &queue->incoming);
|
||||
/*
|
||||
* block->bytes_used may have been modified previously, e.g. by
|
||||
* iio_dma_buffer_block_list_abort(). Reset it here to the
|
||||
* block's so that iio_dma_buffer_io() will work.
|
||||
*/
|
||||
block->bytes_used = block->size;
|
||||
|
||||
/*
|
||||
* If it's an input buffer, mark the block as queued, and
|
||||
* iio_dma_buffer_enable() will submit it. Otherwise mark it as
|
||||
* done, which means it's ready to be dequeued.
|
||||
*/
|
||||
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
|
||||
block->state = IIO_BLOCK_STATE_QUEUED;
|
||||
list_add_tail(&block->head, &queue->incoming);
|
||||
} else {
|
||||
block->state = IIO_BLOCK_STATE_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
@ -488,20 +516,12 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
|
||||
return block;
|
||||
}
|
||||
|
||||
/**
|
||||
* iio_dma_buffer_read() - DMA buffer read callback
|
||||
* @buffer: Buffer to read form
|
||||
* @n: Number of bytes to read
|
||||
* @user_buffer: Userspace buffer to copy the data to
|
||||
*
|
||||
* Should be used as the read callback for iio_buffer_access_ops
|
||||
* struct for DMA buffers.
|
||||
*/
|
||||
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
|
||||
char __user *user_buffer)
|
||||
static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n,
|
||||
char __user *user_buffer, bool is_from_user)
|
||||
{
|
||||
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
|
||||
struct iio_dma_buffer_block *block;
|
||||
void *addr;
|
||||
int ret;
|
||||
|
||||
if (n < buffer->bytes_per_datum)
|
||||
@ -524,8 +544,13 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
|
||||
n = rounddown(n, buffer->bytes_per_datum);
|
||||
if (n > block->bytes_used - queue->fileio.pos)
|
||||
n = block->bytes_used - queue->fileio.pos;
|
||||
addr = block->vaddr + queue->fileio.pos;
|
||||
|
||||
if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
|
||||
if (is_from_user)
|
||||
ret = copy_from_user(addr, user_buffer, n);
|
||||
else
|
||||
ret = copy_to_user(user_buffer, addr, n);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -544,8 +569,40 @@ out_unlock:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iio_dma_buffer_read() - DMA buffer read callback
|
||||
* @buffer: Buffer to read form
|
||||
* @n: Number of bytes to read
|
||||
* @user_buffer: Userspace buffer to copy the data to
|
||||
*
|
||||
* Should be used as the read callback for iio_buffer_access_ops
|
||||
* struct for DMA buffers.
|
||||
*/
|
||||
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
|
||||
char __user *user_buffer)
|
||||
{
|
||||
return iio_dma_buffer_io(buffer, n, user_buffer, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
|
||||
|
||||
/**
|
||||
* iio_dma_buffer_write() - DMA buffer write callback
|
||||
* @buffer: Buffer to read form
|
||||
* @n: Number of bytes to read
|
||||
* @user_buffer: Userspace buffer to copy the data from
|
||||
*
|
||||
* Should be used as the write callback for iio_buffer_access_ops
|
||||
* struct for DMA buffers.
|
||||
*/
|
||||
int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
|
||||
const char __user *user_buffer)
|
||||
{
|
||||
return iio_dma_buffer_io(buffer, n,
|
||||
(__force __user char *)user_buffer, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
|
||||
|
||||
/**
|
||||
* iio_dma_buffer_usage() - DMA buffer data_available and
|
||||
* space_available callback
|
||||
|
@ -132,6 +132,8 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
|
||||
struct iio_dev *indio_dev);
|
||||
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
|
||||
char __user *user_buffer);
|
||||
int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
|
||||
const char __user *user_buffer);
|
||||
size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
|
||||
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
|
||||
int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
|
||||
|
Loading…
x
Reference in New Issue
Block a user