mirror of
https://github.com/systemd/systemd.git
synced 2024-10-27 18:55:40 +03:00
journal: don't try to reuse already calculated hash between files with keyed hash feature
When suppressing duplicate fields between files we so far tried to reuse the already known hash value of the data fields between files. This was fine as long as we used the same hash function everywhere. However, since addition of the keyed hash feature for journal files this doesn't work anymore, since the hashes will be different for different files. Fixes: #19172
This commit is contained in:
parent
8d5a1082b0
commit
2e1a8a5dab
@ -3004,7 +3004,13 @@ _public_ int sd_journal_enumerate_unique(
|
||||
if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
|
||||
continue;
|
||||
|
||||
r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
|
||||
/* We can reuse the hash from our current file only on old-style journal files
|
||||
* without keyed hashes. On new-style files we have to calculate the hash anew, to
|
||||
* take the per-file hash seed into consideration. */
|
||||
if (!JOURNAL_HEADER_KEYED_HASH(j->unique_file->header) && !JOURNAL_HEADER_KEYED_HASH(of->header))
|
||||
r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
|
||||
else
|
||||
r = journal_file_find_data_object(of, odata, ol, NULL, NULL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
if (r > 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user