mirror of
https://github.com/systemd/systemd.git
synced 2024-11-01 17:51:22 +03:00
sd-journal: do not reset sd_j_enumerate_unique position on error
systemctl would call sd_j_enumerate_unique() interleaved with sd_j_next(). But the latter can remove a file if it detects an error in it. In those circumstances sd_j_enumerate_unique would restart with the first file in hashmap. With many corrupted files sd_j_enumerate_unique might iterate over the list multiple times. Avoid this by jumping to the next file in unique list if possible, or setting a flag that tells sd_j_enumerate_unique that it is done otherwise.
This commit is contained in:
parent
f280bcfb21
commit
360af4cf6f
Notes:
Lennart Poettering
2014-10-27 13:08:17 +01:00
Backport: bugfix
@ -124,6 +124,10 @@ struct sd_journal {
|
||||
|
||||
bool on_network;
|
||||
bool no_new_files;
|
||||
bool unique_file_lost; /* File we were iterating over got
|
||||
removed, and there were no more
|
||||
files, so sd_j_enumerate_unique
|
||||
will return a value equal to 0. */
|
||||
|
||||
size_t data_threshold;
|
||||
|
||||
|
@ -1375,8 +1375,11 @@ static void remove_file_real(sd_journal *j, JournalFile *f) {
|
||||
}
|
||||
|
||||
if (j->unique_file == f) {
|
||||
j->unique_file = NULL;
|
||||
/* Jump to the next unique_file or NULL if that one was last */
|
||||
j->unique_file = hashmap_next(j->files, j->unique_file->path);
|
||||
j->unique_offset = 0;
|
||||
if (!j->unique_file)
|
||||
j->unique_file_lost = true;
|
||||
}
|
||||
|
||||
journal_file_close(f);
|
||||
@ -2490,6 +2493,7 @@ _public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
|
||||
j->unique_field = f;
|
||||
j->unique_file = NULL;
|
||||
j->unique_offset = 0;
|
||||
j->unique_file_lost = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2506,9 +2510,13 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
|
||||
k = strlen(j->unique_field);
|
||||
|
||||
if (!j->unique_file) {
|
||||
if (j->unique_file_lost)
|
||||
return 0;
|
||||
|
||||
j->unique_file = hashmap_first(j->files);
|
||||
if (!j->unique_file)
|
||||
return 0;
|
||||
|
||||
j->unique_offset = 0;
|
||||
}
|
||||
|
||||
@ -2538,13 +2546,10 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
|
||||
|
||||
/* We reached the end of the list? Then start again, with the next file */
|
||||
if (j->unique_offset == 0) {
|
||||
JournalFile *n;
|
||||
|
||||
n = hashmap_next(j->files, j->unique_file->path);
|
||||
if (!n)
|
||||
j->unique_file = hashmap_next(j->files, j->unique_file->path);
|
||||
if (!j->unique_file)
|
||||
return 0;
|
||||
|
||||
j->unique_file = n;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2632,6 +2637,7 @@ _public_ void sd_journal_restart_unique(sd_journal *j) {
|
||||
|
||||
j->unique_file = NULL;
|
||||
j->unique_offset = 0;
|
||||
j->unique_file_lost = false;
|
||||
}
|
||||
|
||||
_public_ int sd_journal_reliable_fd(sd_journal *j) {
|
||||
|
Loading…
Reference in New Issue
Block a user