iio: buffer: fix coding style warnings
Just cosmetics. No functional change intended... Signed-off-by: Nuno Sá <nuno.sa@analog.com> Reviewed-by: Lars-Peter Clausen <lars@metafoo.de> Link: https://lore.kernel.org/r/20230216101452.591805-4-nuno.sa@analog.com Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
parent
9a96084182
commit
26e46ef775
@ -194,7 +194,7 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
|
||||
written = 0;
|
||||
add_wait_queue(&rb->pollq, &wait);
|
||||
do {
|
||||
if (indio_dev->info == NULL)
|
||||
if (!indio_dev->info)
|
||||
return -ENODEV;
|
||||
|
||||
if (!iio_buffer_space_available(rb)) {
|
||||
@ -210,7 +210,7 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
|
||||
}
|
||||
|
||||
wait_woken(&wait, TASK_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ static __poll_t iio_buffer_poll(struct file *filp,
|
||||
struct iio_buffer *rb = ib->buffer;
|
||||
struct iio_dev *indio_dev = ib->indio_dev;
|
||||
|
||||
if (!indio_dev->info || rb == NULL)
|
||||
if (!indio_dev->info || !rb)
|
||||
return 0;
|
||||
|
||||
poll_wait(filp, &rb->pollq, wait);
|
||||
@ -407,9 +407,9 @@ static ssize_t iio_scan_el_show(struct device *dev,
|
||||
|
||||
/* Note NULL used as error indicator as it doesn't make sense. */
|
||||
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
|
||||
unsigned int masklength,
|
||||
const unsigned long *mask,
|
||||
bool strict)
|
||||
unsigned int masklength,
|
||||
const unsigned long *mask,
|
||||
bool strict)
|
||||
{
|
||||
if (bitmap_empty(mask, masklength))
|
||||
return NULL;
|
||||
@ -427,7 +427,7 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
|
||||
}
|
||||
|
||||
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask)
|
||||
const unsigned long *mask)
|
||||
{
|
||||
if (!indio_dev->setup_ops->validate_scan_mask)
|
||||
return true;
|
||||
@ -446,7 +446,7 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
|
||||
* individual buffers request is plausible.
|
||||
*/
|
||||
static int iio_scan_mask_set(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer, int bit)
|
||||
struct iio_buffer *buffer, int bit)
|
||||
{
|
||||
const unsigned long *mask;
|
||||
unsigned long *trialmask;
|
||||
@ -539,7 +539,6 @@ error_ret:
|
||||
mutex_unlock(&iio_dev_opaque->mlock);
|
||||
|
||||
return ret < 0 ? ret : len;
|
||||
|
||||
}
|
||||
|
||||
static ssize_t iio_scan_el_ts_show(struct device *dev,
|
||||
@ -706,7 +705,7 @@ static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
|
||||
}
|
||||
|
||||
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask, bool timestamp)
|
||||
const unsigned long *mask, bool timestamp)
|
||||
{
|
||||
unsigned int bytes = 0;
|
||||
int length, i, largest = 0;
|
||||
@ -732,7 +731,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
|
||||
}
|
||||
|
||||
static void iio_buffer_activate(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer)
|
||||
struct iio_buffer *buffer)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
||||
|
||||
@ -753,12 +752,12 @@ static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
|
||||
struct iio_buffer *buffer, *_buffer;
|
||||
|
||||
list_for_each_entry_safe(buffer, _buffer,
|
||||
&iio_dev_opaque->buffer_list, buffer_list)
|
||||
&iio_dev_opaque->buffer_list, buffer_list)
|
||||
iio_buffer_deactivate(buffer);
|
||||
}
|
||||
|
||||
static int iio_buffer_enable(struct iio_buffer *buffer,
|
||||
struct iio_dev *indio_dev)
|
||||
struct iio_dev *indio_dev)
|
||||
{
|
||||
if (!buffer->access->enable)
|
||||
return 0;
|
||||
@ -766,7 +765,7 @@ static int iio_buffer_enable(struct iio_buffer *buffer,
|
||||
}
|
||||
|
||||
static int iio_buffer_disable(struct iio_buffer *buffer,
|
||||
struct iio_dev *indio_dev)
|
||||
struct iio_dev *indio_dev)
|
||||
{
|
||||
if (!buffer->access->disable)
|
||||
return 0;
|
||||
@ -774,7 +773,7 @@ static int iio_buffer_disable(struct iio_buffer *buffer,
|
||||
}
|
||||
|
||||
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer)
|
||||
struct iio_buffer *buffer)
|
||||
{
|
||||
unsigned int bytes;
|
||||
|
||||
@ -782,13 +781,13 @@ static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
|
||||
return;
|
||||
|
||||
bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
|
||||
buffer->scan_timestamp);
|
||||
buffer->scan_timestamp);
|
||||
|
||||
buffer->access->set_bytes_per_datum(buffer, bytes);
|
||||
}
|
||||
|
||||
static int iio_buffer_request_update(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer)
|
||||
struct iio_buffer *buffer)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -797,7 +796,7 @@ static int iio_buffer_request_update(struct iio_dev *indio_dev,
|
||||
ret = buffer->access->request_update(buffer);
|
||||
if (ret) {
|
||||
dev_dbg(&indio_dev->dev,
|
||||
"Buffer not started: buffer parameter update failed (%d)\n",
|
||||
"Buffer not started: buffer parameter update failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -807,7 +806,7 @@ static int iio_buffer_request_update(struct iio_dev *indio_dev,
|
||||
}
|
||||
|
||||
static void iio_free_scan_mask(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask)
|
||||
const unsigned long *mask)
|
||||
{
|
||||
/* If the mask is dynamically allocated free it, otherwise do nothing */
|
||||
if (!indio_dev->available_scan_masks)
|
||||
@ -823,8 +822,9 @@ struct iio_device_config {
|
||||
};
|
||||
|
||||
static int iio_verify_update(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
|
||||
struct iio_device_config *config)
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer,
|
||||
struct iio_device_config *config)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
||||
unsigned long *compound_mask;
|
||||
@ -864,7 +864,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
|
||||
if (insert_buffer) {
|
||||
modes &= insert_buffer->access->modes;
|
||||
config->watermark = min(config->watermark,
|
||||
insert_buffer->watermark);
|
||||
insert_buffer->watermark);
|
||||
}
|
||||
|
||||
/* Definitely possible for devices to support both of these. */
|
||||
@ -890,7 +890,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
|
||||
|
||||
/* What scan mask do we actually have? */
|
||||
compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
|
||||
if (compound_mask == NULL)
|
||||
if (!compound_mask)
|
||||
return -ENOMEM;
|
||||
|
||||
scan_timestamp = false;
|
||||
@ -911,18 +911,18 @@ static int iio_verify_update(struct iio_dev *indio_dev,
|
||||
|
||||
if (indio_dev->available_scan_masks) {
|
||||
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
|
||||
indio_dev->masklength,
|
||||
compound_mask,
|
||||
strict_scanmask);
|
||||
indio_dev->masklength,
|
||||
compound_mask,
|
||||
strict_scanmask);
|
||||
bitmap_free(compound_mask);
|
||||
if (scan_mask == NULL)
|
||||
if (!scan_mask)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
scan_mask = compound_mask;
|
||||
}
|
||||
|
||||
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
|
||||
scan_mask, scan_timestamp);
|
||||
scan_mask, scan_timestamp);
|
||||
config->scan_mask = scan_mask;
|
||||
config->scan_timestamp = scan_timestamp;
|
||||
|
||||
@ -954,16 +954,16 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
|
||||
}
|
||||
|
||||
static int iio_buffer_add_demux(struct iio_buffer *buffer,
|
||||
struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
|
||||
unsigned int length)
|
||||
struct iio_demux_table **p, unsigned int in_loc,
|
||||
unsigned int out_loc,
|
||||
unsigned int length)
|
||||
{
|
||||
|
||||
if (*p && (*p)->from + (*p)->length == in_loc &&
|
||||
(*p)->to + (*p)->length == out_loc) {
|
||||
(*p)->to + (*p)->length == out_loc) {
|
||||
(*p)->length += length;
|
||||
} else {
|
||||
*p = kmalloc(sizeof(**p), GFP_KERNEL);
|
||||
if (*p == NULL)
|
||||
if (!(*p))
|
||||
return -ENOMEM;
|
||||
(*p)->from = in_loc;
|
||||
(*p)->to = out_loc;
|
||||
@ -1027,7 +1027,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
|
||||
out_loc += length;
|
||||
}
|
||||
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
|
||||
if (buffer->demux_bounce == NULL) {
|
||||
if (!buffer->demux_bounce) {
|
||||
ret = -ENOMEM;
|
||||
goto error_clear_mux_table;
|
||||
}
|
||||
@ -1060,7 +1060,7 @@ error_clear_mux_table:
|
||||
}
|
||||
|
||||
static int iio_enable_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_device_config *config)
|
||||
struct iio_device_config *config)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
||||
struct iio_buffer *buffer, *tmp = NULL;
|
||||
@ -1078,7 +1078,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
|
||||
ret = indio_dev->setup_ops->preenable(indio_dev);
|
||||
if (ret) {
|
||||
dev_dbg(&indio_dev->dev,
|
||||
"Buffer not started: buffer preenable failed (%d)\n", ret);
|
||||
"Buffer not started: buffer preenable failed (%d)\n", ret);
|
||||
goto err_undo_config;
|
||||
}
|
||||
}
|
||||
@ -1118,7 +1118,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
|
||||
ret = indio_dev->setup_ops->postenable(indio_dev);
|
||||
if (ret) {
|
||||
dev_dbg(&indio_dev->dev,
|
||||
"Buffer not started: postenable failed (%d)\n", ret);
|
||||
"Buffer not started: postenable failed (%d)\n", ret);
|
||||
goto err_detach_pollfunc;
|
||||
}
|
||||
}
|
||||
@ -1194,15 +1194,15 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
|
||||
}
|
||||
|
||||
static int __iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer)
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer)
|
||||
{
|
||||
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
||||
struct iio_device_config new_config;
|
||||
int ret;
|
||||
|
||||
ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
|
||||
&new_config);
|
||||
&new_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1258,7 +1258,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
return 0;
|
||||
|
||||
if (insert_buffer &&
|
||||
(insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
|
||||
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&iio_dev_opaque->info_exist_lock);
|
||||
@ -1275,7 +1275,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (indio_dev->info == NULL) {
|
||||
if (!indio_dev->info) {
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -1615,7 +1615,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
|
||||
|
||||
buffer_attrcount = 0;
|
||||
if (buffer->attrs) {
|
||||
while (buffer->attrs[buffer_attrcount] != NULL)
|
||||
while (buffer->attrs[buffer_attrcount])
|
||||
buffer_attrcount++;
|
||||
}
|
||||
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
|
||||
@ -1643,7 +1643,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
|
||||
}
|
||||
|
||||
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
|
||||
&channels[i]);
|
||||
&channels[i]);
|
||||
if (ret < 0)
|
||||
goto error_cleanup_dynamic;
|
||||
scan_el_attrcount += ret;
|
||||
@ -1651,10 +1651,10 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
|
||||
iio_dev_opaque->scan_index_timestamp =
|
||||
channels[i].scan_index;
|
||||
}
|
||||
if (indio_dev->masklength && buffer->scan_mask == NULL) {
|
||||
if (indio_dev->masklength && !buffer->scan_mask) {
|
||||
buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
|
||||
GFP_KERNEL);
|
||||
if (buffer->scan_mask == NULL) {
|
||||
if (!buffer->scan_mask) {
|
||||
ret = -ENOMEM;
|
||||
goto error_cleanup_dynamic;
|
||||
}
|
||||
@ -1771,7 +1771,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
|
||||
goto error_unwind_sysfs_and_mask;
|
||||
}
|
||||
|
||||
sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
|
||||
sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
|
||||
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
|
||||
if (!iio_dev_opaque->buffer_ioctl_handler) {
|
||||
ret = -ENOMEM;
|
||||
@ -1820,14 +1820,14 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
|
||||
* a time.
|
||||
*/
|
||||
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask)
|
||||
const unsigned long *mask)
|
||||
{
|
||||
return bitmap_weight(mask, indio_dev->masklength) == 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
|
||||
|
||||
static const void *iio_demux(struct iio_buffer *buffer,
|
||||
const void *datain)
|
||||
const void *datain)
|
||||
{
|
||||
struct iio_demux_table *t;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user