1
0
mirror of https://github.com/systemd/systemd.git synced 2024-12-25 01:34:28 +03:00

Merge pull request #14376 from poettering/sd-event-no-stack

sd-event: don't use stack for event queue array
This commit is contained in:
Lennart Poettering 2019-12-18 17:18:07 +01:00 committed by GitHub
commit ecb040643d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 20 additions and 17 deletions

2
TODO
View File

@ -37,8 +37,6 @@ Features:
waitid() only on the children with the highest priority until one is waitable waitid() only on the children with the highest priority until one is waitable
and ignore all lower-prio ones from that point on and ignore all lower-prio ones from that point on
* sd-event: drop stack allocated epoll_event buffer in sd_event_wait()
* maybe introduce xattrs that can be set on the root dir of the root fs * maybe introduce xattrs that can be set on the root dir of the root fs
partition that declare the volatility mode to use the image in. Previously I partition that declare the volatility mode to use the image in. Previously I
thought marking this via GPT partition flags but that's not ideal since thought marking this via GPT partition flags but that's not ideal since

View File

@ -115,6 +115,9 @@ struct sd_event {
unsigned n_sources; unsigned n_sources;
struct epoll_event *event_queue;
size_t event_queue_allocated;
LIST_HEAD(sd_event_source, sources); LIST_HEAD(sd_event_source, sources);
usec_t last_run, last_log; usec_t last_run, last_log;
@ -286,6 +289,8 @@ static sd_event *event_free(sd_event *e) {
hashmap_free(e->child_sources); hashmap_free(e->child_sources);
set_free(e->post_sources); set_free(e->post_sources);
free(e->event_queue);
return mfree(e); return mfree(e);
} }
@ -3477,8 +3482,7 @@ pending:
} }
_public_ int sd_event_wait(sd_event *e, uint64_t timeout) { _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
struct epoll_event *ev_queue; size_t event_queue_max;
unsigned ev_queue_max;
int r, m, i; int r, m, i;
assert_return(e, -EINVAL); assert_return(e, -EINVAL);
@ -3492,14 +3496,15 @@ _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
return 1; return 1;
} }
ev_queue_max = MAX(e->n_sources, 1u); event_queue_max = MAX(e->n_sources, 1u);
ev_queue = newa(struct epoll_event, ev_queue_max); if (!GREEDY_REALLOC(e->event_queue, e->event_queue_allocated, event_queue_max))
return -ENOMEM;
/* If we still have inotify data buffered, then query the other fds, but don't wait on it */ /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
if (e->inotify_data_buffered) if (e->inotify_data_buffered)
timeout = 0; timeout = 0;
m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max, m = epoll_wait(e->epoll_fd, e->event_queue, event_queue_max,
timeout == (uint64_t) -1 ? -1 : (int) DIV_ROUND_UP(timeout, USEC_PER_MSEC)); timeout == (uint64_t) -1 ? -1 : (int) DIV_ROUND_UP(timeout, USEC_PER_MSEC));
if (m < 0) { if (m < 0) {
if (errno == EINTR) { if (errno == EINTR) {
@ -3515,26 +3520,26 @@ _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
for (i = 0; i < m; i++) { for (i = 0; i < m; i++) {
if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG)) if (e->event_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL); r = flush_timer(e, e->watchdog_fd, e->event_queue[i].events, NULL);
else { else {
WakeupType *t = ev_queue[i].data.ptr; WakeupType *t = e->event_queue[i].data.ptr;
switch (*t) { switch (*t) {
case WAKEUP_EVENT_SOURCE: { case WAKEUP_EVENT_SOURCE: {
sd_event_source *s = ev_queue[i].data.ptr; sd_event_source *s = e->event_queue[i].data.ptr;
assert(s); assert(s);
switch (s->type) { switch (s->type) {
case SOURCE_IO: case SOURCE_IO:
r = process_io(e, s, ev_queue[i].events); r = process_io(e, s, e->event_queue[i].events);
break; break;
case SOURCE_CHILD: case SOURCE_CHILD:
r = process_pidfd(e, s, ev_queue[i].events); r = process_pidfd(e, s, e->event_queue[i].events);
break; break;
default: default:
@ -3545,20 +3550,20 @@ _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
} }
case WAKEUP_CLOCK_DATA: { case WAKEUP_CLOCK_DATA: {
struct clock_data *d = ev_queue[i].data.ptr; struct clock_data *d = e->event_queue[i].data.ptr;
assert(d); assert(d);
r = flush_timer(e, d->fd, ev_queue[i].events, &d->next); r = flush_timer(e, d->fd, e->event_queue[i].events, &d->next);
break; break;
} }
case WAKEUP_SIGNAL_DATA: case WAKEUP_SIGNAL_DATA:
r = process_signal(e, ev_queue[i].data.ptr, ev_queue[i].events); r = process_signal(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
break; break;
case WAKEUP_INOTIFY_DATA: case WAKEUP_INOTIFY_DATA:
r = event_inotify_data_read(e, ev_queue[i].data.ptr, ev_queue[i].events); r = event_inotify_data_read(e, e->event_queue[i].data.ptr, e->event_queue[i].events);
break; break;
default: default: