[media] v4l2-event/ctrls/fh: allocate events per fh and per type instead of just per-fh
The driver had to decide how many events to allocate when the v4l2_fh struct was created. It was possible to add more events afterwards, but there was no way to ensure that you wouldn't miss important events if the event queue would fill up for that filehandle. In addition, once there were no more free events, any new events were simply dropped on the floor. For the control event in particular this made life very difficult since control status/value changes could just be missed if the number of allocated events and the speed at which the application read events was too low to keep up with the number of generated events. The application would have no idea what the latest state was for a control since it could have missed the latest control change. So this patch makes some major changes in how events are allocated. Instead of allocating events per-filehandle they are now allocated when subscribing an event. So for that particular event type N events (determined by the driver) are allocated. Those events are reserved for that particular event type. This ensures that you will not miss events for a particular type altogether. In addition, if there are N events in use and a new event is raised, then the oldest event is dropped and the new one is added. So the latest event is always available. This can be further improved by adding the ability to merge the state of two events together, ensuring that no data is lost at all. This will be added in the next patch. This also makes it possible to allow the user to determine the number of events that will be allocated. This is not implemented at the moment, but would be trivial. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
This commit is contained in:
parent
77068d36d8
commit
f1e393de38
@ -966,10 +966,6 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
|
||||
return -ENOMEM;
|
||||
}
|
||||
v4l2_fh_init(&item->fh, s->vdev);
|
||||
if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
|
||||
s->type == IVTV_DEC_STREAM_TYPE_MPG) {
|
||||
res = v4l2_event_alloc(&item->fh, 60);
|
||||
}
|
||||
if (res < 0) {
|
||||
v4l2_fh_exit(&item->fh);
|
||||
kfree(item);
|
||||
|
@ -1450,13 +1450,11 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
|
||||
switch (sub->type) {
|
||||
case V4L2_EVENT_VSYNC:
|
||||
case V4L2_EVENT_EOS:
|
||||
break;
|
||||
case V4L2_EVENT_CTRL:
|
||||
return v4l2_ctrl_subscribe_fh(fh, sub, 0);
|
||||
return v4l2_event_subscribe(fh, sub, 0);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return v4l2_event_subscribe(fh, sub);
|
||||
}
|
||||
|
||||
static int ivtv_log_status(struct file *file, void *fh)
|
||||
|
@ -1691,7 +1691,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
|
||||
if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_event_subscribe(fh, sub);
|
||||
return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
|
||||
}
|
||||
|
||||
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
|
||||
@ -2162,7 +2162,6 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
|
||||
sd->grp_id = 1 << 16; /* group ID for isp subdevs */
|
||||
v4l2_set_subdevdata(sd, ccdc);
|
||||
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
|
||||
sd->nevents = OMAP3ISP_CCDC_NEVENTS;
|
||||
|
||||
pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
|
||||
pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
|
||||
|
@ -1032,7 +1032,6 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
|
||||
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
|
||||
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
|
||||
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
|
||||
subdev->nevents = STAT_NEVENTS;
|
||||
v4l2_set_subdevdata(subdev, stat);
|
||||
|
||||
stat->pad.flags = MEDIA_PAD_FL_SINK;
|
||||
@ -1050,7 +1049,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
|
||||
if (sub->type != stat->event_type)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_event_subscribe(fh, sub);
|
||||
return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
|
||||
}
|
||||
|
||||
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
|
||||
|
@ -1011,7 +1011,6 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
|
||||
insertion is an O(1) operation. */
|
||||
if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
|
||||
list_add_tail(&new_ref->node, &hdl->ctrl_refs);
|
||||
hdl->nr_of_refs++;
|
||||
goto insert_in_hash;
|
||||
}
|
||||
|
||||
@ -2051,20 +2050,3 @@ void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
|
||||
v4l2_ctrl_unlock(ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_del_event);
|
||||
|
||||
int v4l2_ctrl_subscribe_fh(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub, unsigned n)
|
||||
{
|
||||
struct v4l2_ctrl_handler *hdl = fh->ctrl_handler;
|
||||
int ret = 0;
|
||||
|
||||
if (!ret) {
|
||||
if (hdl->nr_of_refs * 2 > n)
|
||||
n = hdl->nr_of_refs * 2;
|
||||
ret = v4l2_event_alloc(fh, n);
|
||||
}
|
||||
if (!ret)
|
||||
ret = v4l2_event_subscribe(fh, sub);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_subscribe_fh);
|
||||
|
@ -30,44 +30,11 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
|
||||
|
||||
int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
|
||||
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
while (fh->nallocated < n) {
|
||||
struct v4l2_kevent *kev;
|
||||
|
||||
kev = kzalloc(sizeof(*kev), GFP_KERNEL);
|
||||
if (kev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
list_add_tail(&kev->list, &fh->free);
|
||||
fh->nallocated++;
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
idx += sev->first;
|
||||
return idx >= sev->elems ? idx - sev->elems : idx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_alloc);
|
||||
|
||||
#define list_kfree(list, type, member) \
|
||||
while (!list_empty(list)) { \
|
||||
type *hi; \
|
||||
hi = list_first_entry(list, type, member); \
|
||||
list_del(&hi->member); \
|
||||
kfree(hi); \
|
||||
}
|
||||
|
||||
void v4l2_event_free(struct v4l2_fh *fh)
|
||||
{
|
||||
list_kfree(&fh->free, struct v4l2_kevent, list);
|
||||
list_kfree(&fh->available, struct v4l2_kevent, list);
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_free);
|
||||
|
||||
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
|
||||
{
|
||||
@ -84,11 +51,13 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
|
||||
WARN_ON(fh->navailable == 0);
|
||||
|
||||
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
|
||||
list_move(&kev->list, &fh->free);
|
||||
list_del(&kev->list);
|
||||
fh->navailable--;
|
||||
|
||||
kev->event.pending = fh->navailable;
|
||||
*event = kev->event;
|
||||
kev->sev->first = sev_pos(kev->sev, 1);
|
||||
kev->sev->in_use--;
|
||||
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
@ -154,17 +123,24 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
|
||||
fh->sequence++;
|
||||
|
||||
/* Do we have any free events? */
|
||||
if (list_empty(&fh->free))
|
||||
return;
|
||||
if (sev->in_use == sev->elems) {
|
||||
/* no, remove the oldest one */
|
||||
kev = sev->events + sev_pos(sev, 0);
|
||||
list_del(&kev->list);
|
||||
sev->in_use--;
|
||||
sev->first = sev_pos(sev, 1);
|
||||
fh->navailable--;
|
||||
}
|
||||
|
||||
/* Take one and fill it. */
|
||||
kev = list_first_entry(&fh->free, struct v4l2_kevent, list);
|
||||
kev = sev->events + sev_pos(sev, sev->in_use);
|
||||
kev->event.type = ev->type;
|
||||
kev->event.u = ev->u;
|
||||
kev->event.id = ev->id;
|
||||
kev->event.timestamp = *ts;
|
||||
kev->event.sequence = fh->sequence;
|
||||
list_move_tail(&kev->list, &fh->available);
|
||||
sev->in_use++;
|
||||
list_add_tail(&kev->list, &fh->available);
|
||||
|
||||
fh->navailable++;
|
||||
|
||||
@ -209,38 +185,39 @@ int v4l2_event_pending(struct v4l2_fh *fh)
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_pending);
|
||||
|
||||
int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub)
|
||||
struct v4l2_event_subscription *sub, unsigned elems)
|
||||
{
|
||||
struct v4l2_subscribed_event *sev, *found_ev;
|
||||
struct v4l2_ctrl *ctrl = NULL;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
|
||||
if (elems < 1)
|
||||
elems = 1;
|
||||
if (sub->type == V4L2_EVENT_CTRL) {
|
||||
ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
|
||||
if (ctrl == NULL)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sev = kzalloc(sizeof(*sev), GFP_KERNEL);
|
||||
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
|
||||
if (!sev)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < elems; i++)
|
||||
sev->events[i].sev = sev;
|
||||
sev->type = sub->type;
|
||||
sev->id = sub->id;
|
||||
sev->flags = sub->flags;
|
||||
sev->fh = fh;
|
||||
sev->elems = elems;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
|
||||
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
if (!found_ev) {
|
||||
INIT_LIST_HEAD(&sev->list);
|
||||
sev->type = sub->type;
|
||||
sev->id = sub->id;
|
||||
sev->fh = fh;
|
||||
sev->flags = sub->flags;
|
||||
|
||||
if (!found_ev)
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
/* v4l2_ctrl_add_fh uses a mutex, so do this outside the spin lock */
|
||||
/* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
|
||||
if (found_ev)
|
||||
kfree(sev);
|
||||
else if (ctrl)
|
||||
@ -250,7 +227,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
||||
|
||||
static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
||||
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
||||
{
|
||||
struct v4l2_event_subscription sub;
|
||||
struct v4l2_subscribed_event *sev;
|
||||
@ -271,6 +248,7 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
||||
v4l2_event_unsubscribe(fh, &sub);
|
||||
} while (sev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
|
||||
|
||||
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub)
|
||||
|
@ -37,9 +37,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
|
||||
INIT_LIST_HEAD(&fh->list);
|
||||
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
|
||||
fh->prio = V4L2_PRIORITY_UNSET;
|
||||
|
||||
init_waitqueue_head(&fh->wait);
|
||||
INIT_LIST_HEAD(&fh->free);
|
||||
INIT_LIST_HEAD(&fh->available);
|
||||
INIT_LIST_HEAD(&fh->subscribed);
|
||||
fh->sequence = -1;
|
||||
@ -88,7 +86,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
|
||||
{
|
||||
if (fh->vdev == NULL)
|
||||
return;
|
||||
v4l2_event_free(fh);
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
fh->vdev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
||||
|
@ -76,13 +76,6 @@ static int subdev_open(struct file *file)
|
||||
}
|
||||
|
||||
v4l2_fh_init(&subdev_fh->vfh, vdev);
|
||||
|
||||
if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) {
|
||||
ret = v4l2_event_alloc(&subdev_fh->vfh, sd->nevents);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
v4l2_fh_add(&subdev_fh->vfh);
|
||||
file->private_data = &subdev_fh->vfh;
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
|
@ -993,7 +993,7 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
|
||||
{
|
||||
switch (sub->type) {
|
||||
case V4L2_EVENT_CTRL:
|
||||
return v4l2_ctrl_subscribe_fh(fh, sub, 0);
|
||||
return v4l2_event_subscribe(fh, sub, 0);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -124,18 +124,12 @@ uvc_v4l2_open(struct file *file)
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct uvc_device *uvc = video_get_drvdata(vdev);
|
||||
struct uvc_file_handle *handle;
|
||||
int ret;
|
||||
|
||||
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
|
||||
if (handle == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
v4l2_fh_init(&handle->vfh, vdev);
|
||||
|
||||
ret = v4l2_event_alloc(&handle->vfh, 8);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
v4l2_fh_add(&handle->vfh);
|
||||
|
||||
handle->device = &uvc->video;
|
||||
@ -143,10 +137,6 @@ uvc_v4l2_open(struct file *file)
|
||||
|
||||
uvc_function_connect(uvc);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
v4l2_fh_exit(&handle->vfh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -308,7 +298,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
|
||||
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_event_subscribe(&handle->vfh, arg);
|
||||
return v4l2_event_subscribe(&handle->vfh, arg, 2);
|
||||
}
|
||||
|
||||
case VIDIOC_UNSUBSCRIBE_EVENT:
|
||||
|
@ -171,7 +171,6 @@ struct v4l2_ctrl_ref {
|
||||
* control is needed multiple times, so this is a simple
|
||||
* optimization.
|
||||
* @buckets: Buckets for the hashing. Allows for quick control lookup.
|
||||
* @nr_of_refs: Total number of control references in the list.
|
||||
* @nr_of_buckets: Total number of buckets in the array.
|
||||
* @error: The error code of the first failed control addition.
|
||||
*/
|
||||
@ -181,7 +180,6 @@ struct v4l2_ctrl_handler {
|
||||
struct list_head ctrl_refs;
|
||||
struct v4l2_ctrl_ref *cached;
|
||||
struct v4l2_ctrl_ref **buckets;
|
||||
u16 nr_of_refs;
|
||||
u16 nr_of_buckets;
|
||||
int error;
|
||||
};
|
||||
@ -499,23 +497,6 @@ void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
|
||||
void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
|
||||
struct v4l2_subscribed_event *sev);
|
||||
|
||||
/** v4l2_ctrl_subscribe_fh() - Helper function that subscribes a control event.
|
||||
* @fh: The file handler that subscribed the control event.
|
||||
* @sub: The event to subscribe (type must be V4L2_EVENT_CTRL).
|
||||
* @n: How many events should be allocated? (Passed to v4l2_event_alloc).
|
||||
* Recommended to set to twice the number of controls plus whatever
|
||||
* is needed for other events. This function will set n to
|
||||
* max(n, 2 * fh->ctrl_handler->nr_of_refs).
|
||||
*
|
||||
* A helper function that initializes the fh for events, allocates the
|
||||
* list of events and subscribes the control event.
|
||||
*
|
||||
* Typically called in the handler of VIDIOC_SUBSCRIBE_EVENT in the
|
||||
* V4L2_EVENT_CTRL case.
|
||||
*/
|
||||
int v4l2_ctrl_subscribe_fh(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub, unsigned n);
|
||||
|
||||
/* Helpers for ioctl_ops. If hdl == NULL then they will all return -EINVAL. */
|
||||
int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc);
|
||||
int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm);
|
||||
|
@ -30,10 +30,15 @@
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct v4l2_fh;
|
||||
struct v4l2_subscribed_event;
|
||||
struct video_device;
|
||||
|
||||
struct v4l2_kevent {
|
||||
/* list node for the v4l2_fh->available list */
|
||||
struct list_head list;
|
||||
/* pointer to parent v4l2_subscribed_event */
|
||||
struct v4l2_subscribed_event *sev;
|
||||
/* event itself */
|
||||
struct v4l2_event event;
|
||||
};
|
||||
|
||||
@ -50,18 +55,25 @@ struct v4l2_subscribed_event {
|
||||
struct v4l2_fh *fh;
|
||||
/* list node that hooks into the object's event list (if there is one) */
|
||||
struct list_head node;
|
||||
/* the number of elements in the events array */
|
||||
unsigned elems;
|
||||
/* the index of the events containing the oldest available event */
|
||||
unsigned first;
|
||||
/* the number of queued events */
|
||||
unsigned in_use;
|
||||
/* an array of elems events */
|
||||
struct v4l2_kevent events[];
|
||||
};
|
||||
|
||||
int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n);
|
||||
void v4l2_event_free(struct v4l2_fh *fh);
|
||||
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
|
||||
int nonblocking);
|
||||
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
|
||||
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev);
|
||||
int v4l2_event_pending(struct v4l2_fh *fh);
|
||||
int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub);
|
||||
struct v4l2_event_subscription *sub, unsigned elems);
|
||||
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub);
|
||||
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
|
||||
|
||||
#endif /* V4L2_EVENT_H */
|
||||
|
@ -40,10 +40,8 @@ struct v4l2_fh {
|
||||
/* Events */
|
||||
wait_queue_head_t wait;
|
||||
struct list_head subscribed; /* Subscribed events */
|
||||
struct list_head free; /* Events ready for use */
|
||||
struct list_head available; /* Dequeueable event */
|
||||
unsigned int navailable;
|
||||
unsigned int nallocated; /* Number of allocated events */
|
||||
u32 sequence;
|
||||
};
|
||||
|
||||
|
@ -513,8 +513,6 @@ struct v4l2_subdev {
|
||||
void *host_priv;
|
||||
/* subdev device node */
|
||||
struct video_device devnode;
|
||||
/* number of events to be allocated on open */
|
||||
unsigned int nevents;
|
||||
};
|
||||
|
||||
#define media_entity_to_v4l2_subdev(ent) \
|
||||
|
Loading…
Reference in New Issue
Block a user