mirror of
https://github.com/systemd/systemd-stable.git
synced 2025-01-11 05:17:44 +03:00
Merge pull request #22998 from DaanDeMeyer/journal-compact-split
journal: Add compact mode
This commit is contained in:
commit
b25e08a752
@ -468,3 +468,10 @@ SYSTEMD_HOME_DEBUG_SUFFIX=foo \
|
||||
when kernel-install is invoked. This can be useful if kernel-install is invoked
|
||||
unconditionally as a child process by another tool, such as package managers
|
||||
running kernel-install in a postinstall script.
|
||||
|
||||
`systemd-journald`:
|
||||
|
||||
* `$SYSTEMD_JOURNAL_COMPACT` - Takes a boolean. If enabled, journal files are written
|
||||
in a more compact format that reduces the amount of disk space required by the
|
||||
journal. Note that journal files in compact mode are limited to 4G to allow use of
|
||||
32-bit offsets. Enabled by default.
|
||||
|
@ -71,7 +71,7 @@ thread](https://lists.freedesktop.org/archives/systemd-devel/2012-October/007054
|
||||
|
||||
## Basics
|
||||
|
||||
* All offsets, sizes, time values, hashes (and most other numeric values) are 64bit unsigned integers in LE format.
|
||||
* All offsets, sizes, time values, hashes (and most other numeric values) are 32bit/64bit unsigned integers in LE format.
|
||||
* Offsets are always relative to the beginning of the file.
|
||||
* The 64bit hash function siphash24 is used for newer journal files. For older files [Jenkins lookup3](https://en.wikipedia.org/wiki/Jenkins_hash_function) is used, more specifically `jenkins_hashlittle2()` with the first 32bit integer it returns as higher 32bit part of the 64bit value, and the second one uses as lower 32bit part.
|
||||
* All structures are aligned to 64bit boundaries and padded to multiples of 64bit
|
||||
@ -177,6 +177,9 @@ _packed_ struct Header {
|
||||
/* Added in 246 */
|
||||
le64_t data_hash_chain_depth;
|
||||
le64_t field_hash_chain_depth;
|
||||
/* Added in 252 */
|
||||
le32_t tail_entry_array_offset; \
|
||||
le32_t tail_entry_array_n_entries; \
|
||||
};
|
||||
```
|
||||
|
||||
@ -231,6 +234,8 @@ became too frequent.
|
||||
Similar, **field_hash_chain_depth** is a counter of the deepest chain in the
|
||||
field hash table, minus one.
|
||||
|
||||
**tail_entry_array_offset** and **tail_entry_array_n_entries** allow immediate
|
||||
access to the last entry array in the global entry array chain.
|
||||
|
||||
## Extensibility
|
||||
|
||||
@ -259,6 +264,7 @@ enum {
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_LZ4 = 1 << 1,
|
||||
HEADER_INCOMPATIBLE_KEYED_HASH = 1 << 2,
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_ZSTD = 1 << 3,
|
||||
HEADER_INCOMPATIBLE_COMPACT = 1 << 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -276,6 +282,9 @@ HEADER_INCOMPATIBLE_KEYED_HASH indicates that instead of the unkeyed Jenkins
|
||||
hash function the keyed siphash24 hash function is used for the two hash
|
||||
tables, see below.
|
||||
|
||||
HEADER_INCOMPATIBLE_COMPACT indicates that the journal file uses the new binary
|
||||
format that uses less space on disk compared to the original format.
|
||||
|
||||
HEADER_COMPATIBLE_SEALED indicates that the file includes TAG objects required
|
||||
for Forward Secure Sealing.
|
||||
|
||||
@ -393,7 +402,16 @@ _packed_ struct DataObject {
|
||||
le64_t entry_offset; /* the first array entry we store inline */
|
||||
le64_t entry_array_offset;
|
||||
le64_t n_entries;
|
||||
uint8_t payload[];
|
||||
union { \
|
||||
struct { \
|
||||
uint8_t payload[] ; \
|
||||
} regular; \
|
||||
struct { \
|
||||
le32_t tail_entry_array_offset; \
|
||||
le32_t tail_entry_array_n_entries; \
|
||||
uint8_t payload[]; \
|
||||
} compact; \
|
||||
}; \
|
||||
};
|
||||
```
|
||||
|
||||
@ -426,6 +444,9 @@ OBJECT_COMPRESSED_XZ/OBJECT_COMPRESSED_LZ4/OBJECT_COMPRESSED_ZSTD is set in the
|
||||
`ObjectHeader`, in which case the payload is compressed with the indicated
|
||||
compression algorithm.
|
||||
|
||||
If the `HEADER_INCOMPATIBLE_COMPACT` flag is set, Two extra fields are stored to
|
||||
allow immediate access to the tail entry array in the DATA object's entry array
|
||||
chain.
|
||||
|
||||
## Field Objects
|
||||
|
||||
@ -457,11 +478,6 @@ field name. It is the head of a singly linked list using DATA's
|
||||
## Entry Objects
|
||||
|
||||
```
|
||||
_packed_ struct EntryItem {
|
||||
le64_t object_offset;
|
||||
le64_t hash;
|
||||
};
|
||||
|
||||
_packed_ struct EntryObject {
|
||||
ObjectHeader object;
|
||||
le64_t seqnum;
|
||||
@ -469,7 +485,15 @@ _packed_ struct EntryObject {
|
||||
le64_t monotonic;
|
||||
sd_id128_t boot_id;
|
||||
le64_t xor_hash;
|
||||
EntryItem items[];
|
||||
union { \
|
||||
struct { \
|
||||
le64_t object_offset; \
|
||||
le64_t hash; \
|
||||
} regular[]; \
|
||||
struct { \
|
||||
le32_t object_offset; \
|
||||
} compact[]; \
|
||||
} items; \
|
||||
};
|
||||
```
|
||||
|
||||
@ -495,6 +519,10 @@ The **items[]** array contains references to all DATA objects of this entry,
|
||||
plus their respective hashes (which are calculated the same way as in the DATA
|
||||
objects, i.e. keyed by the file ID).
|
||||
|
||||
If the `HEADER_INCOMPATIBLE_COMPACT` flag is set, DATA object offsets are stored
|
||||
as 32-bit integers instead of 64bit and the unused hash field per data object is
|
||||
not stored anymore.
|
||||
|
||||
In the file ENTRY objects are written ordered monotonically by sequence
|
||||
number. For continuous parts of the file written during the same boot
|
||||
(i.e. with the same boot_id) the monotonic timestamp is monotonic too. Modulo
|
||||
@ -548,7 +576,10 @@ creativity rather than runtime parameters.
|
||||
_packed_ struct EntryArrayObject {
|
||||
ObjectHeader object;
|
||||
le64_t next_entry_array_offset;
|
||||
le64_t items[];
|
||||
union {
|
||||
le64_t regular[];
|
||||
le32_t compact[];
|
||||
} items;
|
||||
};
|
||||
```
|
||||
|
||||
@ -556,6 +587,9 @@ Entry Arrays are used to store a sorted array of offsets to entries. Entry
|
||||
arrays are strictly sorted by offsets on disk, and hence by their timestamps
|
||||
and sequence numbers (with some restrictions, see above).
|
||||
|
||||
If the `HEADER_INCOMPATIBLE_COMPACT` flag is set, offsets are stored as 32-bit
|
||||
integers instead of 64bit.
|
||||
|
||||
Entry Arrays are chained up. If one entry array is full another one is
|
||||
allocated and the **next_entry_array_offset** field of the old one pointed to
|
||||
it. An Entry Array with **next_entry_array_offset** set to 0 is the last in the
|
||||
|
@ -854,6 +854,17 @@
|
||||
cryptographic theory it is based on.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--convert=</option></term>
|
||||
|
||||
<listitem><para>Converts the specified journal files to the latest supported journal format. Takes
|
||||
the path to store the converted journal files. The path should include the filename to be used for
|
||||
the converted files, with the <literal>.journal</literal> extension (e.g.
|
||||
<filename>/a/b/c/converted.journal</filename> will store the journal files in the
|
||||
<filename>/a/b/c</filename> directory using <filename>converted.journal</filename> as the filename).
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<xi:include href="standard-options.xml" xpointer="help" />
|
||||
<xi:include href="standard-options.xml" xpointer="version" />
|
||||
</variablelist>
|
||||
|
@ -2293,11 +2293,13 @@ public_programs += executable(
|
||||
install : true)
|
||||
|
||||
if get_option('link-journalctl-shared')
|
||||
journalctl_link_with = [libshared]
|
||||
journalctl_link_with = [libshared,
|
||||
libjournal_core]
|
||||
else
|
||||
journalctl_link_with = [libsystemd_static,
|
||||
libshared_static,
|
||||
libbasic_gcrypt]
|
||||
libbasic_gcrypt,
|
||||
libjournal_core]
|
||||
endif
|
||||
|
||||
public_programs += executable(
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "locale-util.h"
|
||||
#include "log.h"
|
||||
#include "logs-show.h"
|
||||
#include "managed-journal-file.h"
|
||||
#include "memory-util.h"
|
||||
#include "mkdir.h"
|
||||
#include "mount-util.h"
|
||||
@ -128,6 +129,7 @@ static uint64_t arg_vacuum_size = 0;
|
||||
static uint64_t arg_vacuum_n_files = 0;
|
||||
static usec_t arg_vacuum_time = 0;
|
||||
static char **arg_output_fields = NULL;
|
||||
static const char *arg_convert = NULL;
|
||||
static const char *arg_pattern = NULL;
|
||||
static pcre2_code *arg_compiled_pattern = NULL;
|
||||
static PatternCompileCase arg_case = PATTERN_COMPILE_CASE_AUTO;
|
||||
@ -162,6 +164,7 @@ static enum {
|
||||
ACTION_ROTATE_AND_VACUUM,
|
||||
ACTION_LIST_FIELDS,
|
||||
ACTION_LIST_FIELD_NAMES,
|
||||
ACTION_CONVERT,
|
||||
} arg_action = ACTION_SHOW;
|
||||
|
||||
typedef struct BootId {
|
||||
@ -387,6 +390,7 @@ static int help(void) {
|
||||
" --dump-catalog Show entries in the message catalog\n"
|
||||
" --update-catalog Update the message catalog database\n"
|
||||
" --setup-keys Generate a new FSS key pair\n"
|
||||
" --convert=PATH Convert the journal to the latest journal format\n"
|
||||
"\nSee the %2$s for details.\n",
|
||||
program_invocation_short_name,
|
||||
link,
|
||||
@ -441,6 +445,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
ARG_NO_HOSTNAME,
|
||||
ARG_OUTPUT_FIELDS,
|
||||
ARG_NAMESPACE,
|
||||
ARG_CONVERT,
|
||||
};
|
||||
|
||||
static const struct option options[] = {
|
||||
@ -508,6 +513,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
{ "no-hostname", no_argument, NULL, ARG_NO_HOSTNAME },
|
||||
{ "output-fields", required_argument, NULL, ARG_OUTPUT_FIELDS },
|
||||
{ "namespace", required_argument, NULL, ARG_NAMESPACE },
|
||||
{ "convert", required_argument, NULL, ARG_CONVERT },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1034,6 +1040,11 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
break;
|
||||
}
|
||||
|
||||
case ARG_CONVERT:
|
||||
arg_action = ACTION_CONVERT;
|
||||
arg_convert = optarg;
|
||||
break;
|
||||
|
||||
case '?':
|
||||
return -EINVAL;
|
||||
|
||||
@ -2093,6 +2104,52 @@ static int wait_for_change(sd_journal *j, int poll_fd) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int journal_convert(sd_journal *j) {
|
||||
_cleanup_(managed_journal_file_closep) ManagedJournalFile *to = NULL;
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *mmap = NULL;
|
||||
int r;
|
||||
|
||||
assert(arg_convert);
|
||||
|
||||
mmap = mmap_cache_new();
|
||||
if (!mmap)
|
||||
return -ENOMEM;
|
||||
|
||||
r = managed_journal_file_open(-1, arg_convert, O_RDWR | O_CREAT, JOURNAL_COMPRESS, 0640, UINT64_MAX,
|
||||
&(JournalMetrics) { -1, -1, -1, -1, -1, -1 }, mmap, NULL, NULL, &to);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to open journal: %m");
|
||||
|
||||
SD_JOURNAL_FOREACH(j) {
|
||||
Object *o;
|
||||
JournalFile *from;
|
||||
|
||||
from = j->current_file;
|
||||
assert(from && from->current_offset > 0);
|
||||
|
||||
r = journal_file_move_to_object(from, OBJECT_ENTRY, from->current_offset, &o);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Can't read entry: %m");
|
||||
|
||||
r = journal_file_copy_entry(from, to->file, o, from->current_offset);
|
||||
if (r >= 0)
|
||||
continue;
|
||||
|
||||
if (!journal_shall_try_append_again(to->file, r))
|
||||
return log_error_errno(r, "Can't write entry: %m");
|
||||
|
||||
r = managed_journal_file_rotate(&to, mmap, JOURNAL_COMPRESS, UINT64_MAX, NULL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = journal_file_copy_entry(from, to->file, o, from->current_offset);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Can't write entry: %m");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
_cleanup_(loop_device_unrefp) LoopDevice *loop_device = NULL;
|
||||
_cleanup_(umount_and_rmdir_and_freep) char *unlink_dir = NULL;
|
||||
@ -2203,6 +2260,7 @@ int main(int argc, char *argv[]) {
|
||||
case ACTION_ROTATE_AND_VACUUM:
|
||||
case ACTION_LIST_FIELDS:
|
||||
case ACTION_LIST_FIELD_NAMES:
|
||||
case ACTION_CONVERT:
|
||||
/* These ones require access to the journal files, continue below. */
|
||||
break;
|
||||
|
||||
@ -2357,6 +2415,10 @@ int main(int argc, char *argv[]) {
|
||||
case ACTION_LIST_FIELDS:
|
||||
break;
|
||||
|
||||
case ACTION_CONVERT:
|
||||
r = journal_convert(j);
|
||||
goto finish;
|
||||
|
||||
default:
|
||||
assert_not_reached();
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "io-util.h"
|
||||
#include "journal-authenticate.h"
|
||||
#include "journal-internal.h"
|
||||
#include "journal-util.h"
|
||||
#include "journal-vacuum.h"
|
||||
#include "journald-audit.h"
|
||||
#include "journald-context.h"
|
||||
@ -769,55 +770,6 @@ static void server_cache_hostname(Server *s) {
|
||||
free_and_replace(s->hostname_field, x);
|
||||
}
|
||||
|
||||
static bool shall_try_append_again(JournalFile *f, int r) {
|
||||
switch (r) {
|
||||
|
||||
case -E2BIG: /* Hit configured limit */
|
||||
case -EFBIG: /* Hit fs limit */
|
||||
case -EDQUOT: /* Quota limit hit */
|
||||
case -ENOSPC: /* Disk full */
|
||||
log_debug("%s: Allocation limit reached, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EIO: /* I/O error of some kind (mmap) */
|
||||
log_warning("%s: IO error, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EHOSTDOWN: /* Other machine */
|
||||
log_info("%s: Journal file from other machine, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EBUSY: /* Unclean shutdown */
|
||||
log_info("%s: Unclean shutdown, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EPROTONOSUPPORT: /* Unsupported feature */
|
||||
log_info("%s: Unsupported feature, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EBADMSG: /* Corrupted */
|
||||
case -ENODATA: /* Truncated */
|
||||
case -ESHUTDOWN: /* Already archived */
|
||||
log_warning("%s: Journal file corrupted, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EIDRM: /* Journal file has been deleted */
|
||||
log_warning("%s: Journal file has been deleted, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -ETXTBSY: /* Journal file is from the future */
|
||||
log_warning("%s: Journal file is from the future, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EAFNOSUPPORT:
|
||||
log_warning("%s: underlying file system does not support memory mapping or another required file system feature.", f->path);
|
||||
return false;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void write_to_journal(Server *s, uid_t uid, struct iovec *iovec, size_t n, int priority) {
|
||||
bool vacuumed = false, rotate = false;
|
||||
struct dual_timestamp ts;
|
||||
@ -872,7 +824,7 @@ static void write_to_journal(Server *s, uid_t uid, struct iovec *iovec, size_t n
|
||||
return;
|
||||
}
|
||||
|
||||
if (vacuumed || !shall_try_append_again(f->file, r)) {
|
||||
if (vacuumed || !journal_shall_try_append_again(f->file, r)) {
|
||||
log_ratelimit_full_errno(LOG_ERR, r, "Failed to write entry (%zu items, %zu bytes), ignoring: %m", n, IOVEC_TOTAL_SIZE(iovec, n));
|
||||
return;
|
||||
}
|
||||
@ -1202,7 +1154,7 @@ int server_flush_to_var(Server *s, bool require_flag_file) {
|
||||
if (r >= 0)
|
||||
continue;
|
||||
|
||||
if (!shall_try_append_again(s->system_journal->file, r)) {
|
||||
if (!journal_shall_try_append_again(s->system_journal->file, r)) {
|
||||
log_error_errno(r, "Can't write entry: %m");
|
||||
goto finish;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ static int managed_journal_file_entry_array_punch_hole(JournalFile *f, uint64_t
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
n_items += journal_file_entry_array_n_items(&o);
|
||||
n_items += journal_file_entry_array_n_items(f, &o);
|
||||
p = q;
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ static int managed_journal_file_entry_array_punch_hole(JournalFile *f, uint64_t
|
||||
return 0;
|
||||
|
||||
offset = p + offsetof(Object, entry_array.items) +
|
||||
(journal_file_entry_array_n_items(&o) - n_unused) * sizeof(le64_t);
|
||||
(journal_file_entry_array_n_items(f, &o) - n_unused) * journal_file_entry_array_item_size(f);
|
||||
sz = p + le64toh(o.object.size) - offset;
|
||||
|
||||
if (sz < MINIMUM_HOLE_SIZE)
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "path-util.h"
|
||||
#include "string-util.h"
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
static void test_journal_flush(int argc, char *argv[]) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
_cleanup_free_ char *fn = NULL;
|
||||
char dn[] = "/var/tmp/test-journal-flush.XXXXXX";
|
||||
@ -70,6 +70,14 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
unlink(fn);
|
||||
assert_se(rmdir(dn) == 0);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
test_journal_flush(argc, argv);
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
test_journal_flush(argc, argv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ TEST(skip) {
|
||||
test_skip_one(setup_interleaved);
|
||||
}
|
||||
|
||||
TEST(sequence_numbers) {
|
||||
static void test_sequence_numbers_one(void) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
char t[] = "/var/tmp/journal-seq-XXXXXX";
|
||||
ManagedJournalFile *one, *two;
|
||||
@ -295,6 +295,14 @@ TEST(sequence_numbers) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(sequence_numbers) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
test_sequence_numbers_one();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
test_sequence_numbers_one();
|
||||
}
|
||||
|
||||
static int intro(void) {
|
||||
/* managed_journal_file_open requires a valid machine id */
|
||||
if (access("/etc/machine-id", F_OK) != 0)
|
||||
|
@ -184,12 +184,19 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
test_setup_logging(LOG_DEBUG);
|
||||
|
||||
/* Run this test twice. Once with old hashing and once with new hashing */
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_KEYED_HASH", "1", 1) >= 0);
|
||||
run_test();
|
||||
/* Run this test multiple times with different configurations of features. */
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_KEYED_HASH", "0", 1) >= 0);
|
||||
run_test();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_KEYED_HASH", "1", 1) >= 0);
|
||||
run_test();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
run_test();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
run_test();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static int raw_verify(const char *fn, const char *verification_key) {
|
||||
return r;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
static int run_test(int argc, char *argv[]) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
char t[] = "/var/tmp/journal-XXXXXX";
|
||||
unsigned n;
|
||||
@ -141,3 +141,13 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
run_test(argc, argv);
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
run_test(argc, argv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ static void mkdtemp_chdir_chattr(char *path) {
|
||||
(void) chattr_path(path, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
|
||||
}
|
||||
|
||||
TEST(non_empty) {
|
||||
static void test_non_empty_one(void) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
dual_timestamp ts;
|
||||
ManagedJournalFile *f;
|
||||
@ -118,7 +118,15 @@ TEST(non_empty) {
|
||||
puts("------------------------------------------------------------");
|
||||
}
|
||||
|
||||
TEST(empty) {
|
||||
TEST(non_empty) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
test_non_empty_one();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
test_non_empty_one();
|
||||
}
|
||||
|
||||
static void test_empty_one(void) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
ManagedJournalFile *f1, *f2, *f3, *f4;
|
||||
char t[] = "/var/tmp/journal-XXXXXX";
|
||||
@ -158,6 +166,14 @@ TEST(empty) {
|
||||
(void) managed_journal_file_close(f4);
|
||||
}
|
||||
|
||||
TEST(empty) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
test_empty_one();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
test_empty_one();
|
||||
}
|
||||
|
||||
#if HAVE_COMPRESSION
|
||||
static bool check_compressed(uint64_t compress_threshold, uint64_t data_size) {
|
||||
_cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
|
||||
@ -222,7 +238,7 @@ static bool check_compressed(uint64_t compress_threshold, uint64_t data_size) {
|
||||
return is_compressed;
|
||||
}
|
||||
|
||||
TEST(min_compress_size) {
|
||||
static void test_min_compress_size_one(void) {
|
||||
/* Note that XZ will actually fail to compress anything under 80 bytes, so you have to choose the limits
|
||||
* carefully */
|
||||
|
||||
@ -241,6 +257,14 @@ TEST(min_compress_size) {
|
||||
assert_se(check_compressed(256, 256));
|
||||
assert_se(!check_compressed(256, 255));
|
||||
}
|
||||
|
||||
TEST(min_compress_size) {
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
|
||||
test_min_compress_size_one();
|
||||
|
||||
assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
|
||||
test_min_compress_size_one();
|
||||
}
|
||||
#endif
|
||||
|
||||
static int intro(void) {
|
||||
|
@ -248,7 +248,7 @@ int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uin
|
||||
case OBJECT_DATA:
|
||||
/* All but hash and payload are mutable */
|
||||
gcry_md_write(f->hmac, &o->data.hash, sizeof(o->data.hash));
|
||||
gcry_md_write(f->hmac, o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload));
|
||||
gcry_md_write(f->hmac, journal_file_data_payload_field(f, o), le64toh(o->object.size) - journal_file_data_payload_offset(f));
|
||||
break;
|
||||
|
||||
case OBJECT_FIELD:
|
||||
|
@ -24,7 +24,6 @@ typedef struct HashTableObject HashTableObject;
|
||||
typedef struct EntryArrayObject EntryArrayObject;
|
||||
typedef struct TagObject TagObject;
|
||||
|
||||
typedef struct EntryItem EntryItem;
|
||||
typedef struct HashItem HashItem;
|
||||
|
||||
typedef struct FSSHeader FSSHeader;
|
||||
@ -66,8 +65,17 @@ struct ObjectHeader {
|
||||
le64_t entry_offset; /* the first array entry we store inline */ \
|
||||
le64_t entry_array_offset; \
|
||||
le64_t n_entries; \
|
||||
uint8_t payload[]; \
|
||||
}
|
||||
union { \
|
||||
struct { \
|
||||
uint8_t payload[0]; \
|
||||
} regular; \
|
||||
struct { \
|
||||
le32_t tail_entry_array_offset; \
|
||||
le32_t tail_entry_array_n_entries; \
|
||||
uint8_t payload[0]; \
|
||||
} compact; \
|
||||
}; \
|
||||
}
|
||||
|
||||
struct DataObject DataObject__contents;
|
||||
struct DataObject__packed DataObject__contents _packed_;
|
||||
@ -85,20 +93,23 @@ struct FieldObject FieldObject__contents;
|
||||
struct FieldObject__packed FieldObject__contents _packed_;
|
||||
assert_cc(sizeof(struct FieldObject) == sizeof(struct FieldObject__packed));
|
||||
|
||||
struct EntryItem {
|
||||
le64_t object_offset;
|
||||
le64_t hash;
|
||||
} _packed_;
|
||||
|
||||
#define EntryObject__contents { \
|
||||
ObjectHeader object; \
|
||||
le64_t seqnum; \
|
||||
le64_t realtime; \
|
||||
le64_t monotonic; \
|
||||
sd_id128_t boot_id; \
|
||||
le64_t xor_hash; \
|
||||
EntryItem items[]; \
|
||||
}
|
||||
#define EntryObject__contents { \
|
||||
ObjectHeader object; \
|
||||
le64_t seqnum; \
|
||||
le64_t realtime; \
|
||||
le64_t monotonic; \
|
||||
sd_id128_t boot_id; \
|
||||
le64_t xor_hash; \
|
||||
union { \
|
||||
struct { \
|
||||
le64_t object_offset; \
|
||||
le64_t hash; \
|
||||
} regular[0]; \
|
||||
struct { \
|
||||
le32_t object_offset; \
|
||||
} compact[0]; \
|
||||
} items; \
|
||||
}
|
||||
|
||||
struct EntryObject EntryObject__contents;
|
||||
struct EntryObject__packed EntryObject__contents _packed_;
|
||||
@ -117,7 +128,10 @@ struct HashTableObject {
|
||||
struct EntryArrayObject {
|
||||
ObjectHeader object;
|
||||
le64_t next_entry_array_offset;
|
||||
le64_t items[];
|
||||
union {
|
||||
le64_t regular[0];
|
||||
le32_t compact[0];
|
||||
} items;
|
||||
} _packed_;
|
||||
|
||||
#define TAG_LENGTH (256/8)
|
||||
@ -152,19 +166,22 @@ enum {
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_LZ4 = 1 << 1,
|
||||
HEADER_INCOMPATIBLE_KEYED_HASH = 1 << 2,
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_ZSTD = 1 << 3,
|
||||
HEADER_INCOMPATIBLE_COMPACT = 1 << 4,
|
||||
};
|
||||
|
||||
#define HEADER_INCOMPATIBLE_ANY \
|
||||
(HEADER_INCOMPATIBLE_COMPRESSED_XZ | \
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_LZ4 | \
|
||||
HEADER_INCOMPATIBLE_KEYED_HASH | \
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
|
||||
HEADER_INCOMPATIBLE_COMPRESSED_ZSTD | \
|
||||
HEADER_INCOMPATIBLE_COMPACT)
|
||||
|
||||
#define HEADER_INCOMPATIBLE_SUPPORTED \
|
||||
((HAVE_XZ ? HEADER_INCOMPATIBLE_COMPRESSED_XZ : 0) | \
|
||||
(HAVE_LZ4 ? HEADER_INCOMPATIBLE_COMPRESSED_LZ4 : 0) | \
|
||||
(HAVE_ZSTD ? HEADER_INCOMPATIBLE_COMPRESSED_ZSTD : 0) | \
|
||||
HEADER_INCOMPATIBLE_KEYED_HASH)
|
||||
HEADER_INCOMPATIBLE_KEYED_HASH | \
|
||||
HEADER_INCOMPATIBLE_COMPACT)
|
||||
|
||||
enum {
|
||||
HEADER_COMPATIBLE_SEALED = 1 << 0,
|
||||
@ -214,12 +231,15 @@ enum {
|
||||
/* Added in 246 */ \
|
||||
le64_t data_hash_chain_depth; \
|
||||
le64_t field_hash_chain_depth; \
|
||||
/* Added in 252 */ \
|
||||
le32_t tail_entry_array_offset; \
|
||||
le32_t tail_entry_array_n_entries; \
|
||||
}
|
||||
|
||||
struct Header struct_Header__contents;
|
||||
struct Header__packed struct_Header__contents _packed_;
|
||||
assert_cc(sizeof(struct Header) == sizeof(struct Header__packed));
|
||||
assert_cc(sizeof(struct Header) == 256);
|
||||
assert_cc(sizeof(struct Header) == 264);
|
||||
|
||||
#define FSS_HEADER_SIGNATURE \
|
||||
((const char[]) { 'K', 'S', 'H', 'H', 'R', 'H', 'L', 'P' })
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
/* This is the minimum journal file size */
|
||||
#define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
|
||||
#define JOURNAL_COMPACT_SIZE_MAX UINT32_MAX /* 4 GiB */
|
||||
|
||||
/* These are the lower and upper bounds if we deduce the max_use value
|
||||
* from the file system size */
|
||||
@ -294,24 +295,38 @@ JournalFile* journal_file_close(JournalFile *f) {
|
||||
return mfree(f);
|
||||
}
|
||||
|
||||
static bool keyed_hash_requested(void) {
|
||||
int r;
|
||||
|
||||
r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
|
||||
if (r >= 0)
|
||||
return r;
|
||||
if (r != -ENXIO)
|
||||
log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool compact_mode_requested(void) {
|
||||
int r;
|
||||
|
||||
r = getenv_bool("SYSTEMD_JOURNAL_COMPACT");
|
||||
if (r >= 0)
|
||||
return r;
|
||||
if (r != -ENXIO)
|
||||
log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int journal_file_init_header(JournalFile *f, JournalFileFlags file_flags, JournalFile *template) {
|
||||
Header h = {};
|
||||
ssize_t k;
|
||||
bool keyed_hash, seal = false;
|
||||
bool seal = false;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
|
||||
/* We turn on keyed hashes by default, but provide an environment variable to turn them off, if
|
||||
* people really want that */
|
||||
r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
|
||||
if (r < 0) {
|
||||
if (r != -ENXIO)
|
||||
log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
|
||||
keyed_hash = true;
|
||||
} else
|
||||
keyed_hash = r;
|
||||
|
||||
#if HAVE_GCRYPT
|
||||
/* Try to load the FSPRG state, and if we can't, then just don't do sealing */
|
||||
seal = FLAGS_SET(file_flags, JOURNAL_SEAL) && journal_file_fss_load(f) >= 0;
|
||||
@ -323,7 +338,8 @@ static int journal_file_init_header(JournalFile *f, JournalFileFlags file_flags,
|
||||
h.incompatible_flags |= htole32(
|
||||
FLAGS_SET(file_flags, JOURNAL_COMPRESS) *
|
||||
COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(DEFAULT_COMPRESSION) |
|
||||
keyed_hash * HEADER_INCOMPATIBLE_KEYED_HASH);
|
||||
keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH |
|
||||
compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT);
|
||||
|
||||
h.compatible_flags = htole32(seal * HEADER_COMPATIBLE_SEALED);
|
||||
|
||||
@ -387,7 +403,7 @@ static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
|
||||
f->path, type, flags & ~any);
|
||||
flags = (flags & any) & ~supported;
|
||||
if (flags) {
|
||||
const char* strv[5];
|
||||
const char* strv[6];
|
||||
size_t n = 0;
|
||||
_cleanup_free_ char *t = NULL;
|
||||
|
||||
@ -403,6 +419,8 @@ static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
|
||||
strv[n++] = "zstd-compressed";
|
||||
if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
|
||||
strv[n++] = "keyed-hash";
|
||||
if (flags & HEADER_INCOMPATIBLE_COMPACT)
|
||||
strv[n++] = "compact";
|
||||
}
|
||||
strv[n] = NULL;
|
||||
assert(n < ELEMENTSOF(strv));
|
||||
@ -568,6 +586,10 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size)
|
||||
if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
|
||||
return -E2BIG;
|
||||
|
||||
/* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
|
||||
if (JOURNAL_HEADER_COMPACT(f->header) && new_size > UINT32_MAX)
|
||||
return -E2BIG;
|
||||
|
||||
if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
|
||||
struct statvfs svfs;
|
||||
|
||||
@ -640,7 +662,7 @@ static int journal_file_move_to(
|
||||
return mmap_cache_fd_get(f->cache_fd, type_to_context(type), keep_always, offset, size, &f->last_stat, ret);
|
||||
}
|
||||
|
||||
static uint64_t minimum_header_size(Object *o) {
|
||||
static uint64_t minimum_header_size(JournalFile *f, Object *o) {
|
||||
|
||||
static const uint64_t table[] = {
|
||||
[OBJECT_DATA] = sizeof(DataObject),
|
||||
@ -652,15 +674,22 @@ static uint64_t minimum_header_size(Object *o) {
|
||||
[OBJECT_TAG] = sizeof(TagObject),
|
||||
};
|
||||
|
||||
assert(f);
|
||||
assert(o);
|
||||
|
||||
if (o->object.type == OBJECT_DATA)
|
||||
return journal_file_data_payload_offset(f);
|
||||
|
||||
if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
|
||||
return sizeof(ObjectHeader);
|
||||
|
||||
return table[o->object.type];
|
||||
}
|
||||
|
||||
static int check_object_header(Object *o, ObjectType type, uint64_t offset) {
|
||||
static int check_object_header(JournalFile *f, Object *o, ObjectType type, uint64_t offset) {
|
||||
uint64_t s;
|
||||
|
||||
assert(f);
|
||||
assert(o);
|
||||
|
||||
s = le64toh(READ_NOW(o->object.size));
|
||||
@ -684,7 +713,7 @@ static int check_object_header(Object *o, ObjectType type, uint64_t offset) {
|
||||
"Attempt to move to object of unexpected type: %" PRIu64,
|
||||
offset);
|
||||
|
||||
if (s < minimum_header_size(o))
|
||||
if (s < minimum_header_size(f, o))
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
|
||||
"Attempt to move to truncated object: %" PRIu64,
|
||||
offset);
|
||||
@ -694,7 +723,7 @@ static int check_object_header(Object *o, ObjectType type, uint64_t offset) {
|
||||
|
||||
/* Lightweight object checks. We want this to be fast, so that we won't
|
||||
* slowdown every journal_file_move_to_object() call too much. */
|
||||
static int check_object(Object *o, uint64_t offset) {
|
||||
static int check_object(JournalFile *f, Object *o, uint64_t offset) {
|
||||
assert(o);
|
||||
|
||||
switch (o->object.type) {
|
||||
@ -706,10 +735,10 @@ static int check_object(Object *o, uint64_t offset) {
|
||||
le64toh(o->data.n_entries),
|
||||
offset);
|
||||
|
||||
if (le64toh(o->object.size) <= offsetof(Object, data.payload))
|
||||
if (le64toh(o->object.size) <= journal_file_data_payload_offset(f))
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
|
||||
"Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
|
||||
offsetof(Object, data.payload),
|
||||
journal_file_data_payload_offset(f),
|
||||
le64toh(o->object.size),
|
||||
offset);
|
||||
|
||||
@ -749,17 +778,17 @@ static int check_object(Object *o, uint64_t offset) {
|
||||
|
||||
sz = le64toh(READ_NOW(o->object.size));
|
||||
if (sz < offsetof(Object, entry.items) ||
|
||||
(sz - offsetof(Object, entry.items)) % sizeof(EntryItem) != 0)
|
||||
(sz - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0)
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
|
||||
"Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
|
||||
offsetof(Object, entry.items),
|
||||
sz,
|
||||
offset);
|
||||
|
||||
if ((sz - offsetof(Object, entry.items)) / sizeof(EntryItem) <= 0)
|
||||
if ((sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0)
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
|
||||
"Invalid number items in entry: %" PRIu64 ": %" PRIu64,
|
||||
(sz - offsetof(Object, entry.items)) / sizeof(EntryItem),
|
||||
(sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f),
|
||||
offset);
|
||||
|
||||
if (le64toh(o->entry.seqnum) <= 0)
|
||||
@ -805,8 +834,8 @@ static int check_object(Object *o, uint64_t offset) {
|
||||
|
||||
sz = le64toh(READ_NOW(o->object.size));
|
||||
if (sz < offsetof(Object, entry_array.items) ||
|
||||
(sz - offsetof(Object, entry_array.items)) % sizeof(le64_t) != 0 ||
|
||||
(sz - offsetof(Object, entry_array.items)) / sizeof(le64_t) <= 0)
|
||||
(sz - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
|
||||
(sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0)
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
|
||||
"Invalid object entry array size: %" PRIu64 ": %" PRIu64,
|
||||
sz,
|
||||
@ -861,7 +890,7 @@ int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = check_object_header(o, type, offset);
|
||||
r = check_object_header(f, o, type, offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -869,11 +898,11 @@ int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = check_object_header(o, type, offset);
|
||||
r = check_object_header(f, o, type, offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = check_object(o, offset);
|
||||
r = check_object(f, o, offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -913,16 +942,16 @@ int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t of
|
||||
"Failed to read short object at offset: %" PRIu64,
|
||||
offset);
|
||||
|
||||
r = check_object_header(&o, type, offset);
|
||||
r = check_object_header(f, &o, type, offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if ((size_t) n < minimum_header_size(&o))
|
||||
if ((size_t) n < minimum_header_size(f, &o))
|
||||
return log_debug_errno(SYNTHETIC_ERRNO(EIO),
|
||||
"Short read while reading object: %" PRIu64,
|
||||
offset);
|
||||
|
||||
r = check_object(&o, offset);
|
||||
r = check_object(f, &o, offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -1347,7 +1376,7 @@ int journal_file_find_data_object_with_hash(
|
||||
const void *data, uint64_t size, uint64_t hash,
|
||||
Object **ret, uint64_t *ret_offset) {
|
||||
|
||||
uint64_t p, osize, h, m, depth = 0;
|
||||
uint64_t p, h, m, depth = 0;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
@ -1363,8 +1392,6 @@ int journal_file_find_data_object_with_hash(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
osize = offsetof(Object, data.payload) + size;
|
||||
|
||||
m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
|
||||
if (m <= 0)
|
||||
return -EBADMSG;
|
||||
@ -1373,8 +1400,9 @@ int journal_file_find_data_object_with_hash(
|
||||
p = le64toh(f->data_hash_table[h].head_hash_offset);
|
||||
|
||||
while (p > 0) {
|
||||
Compression c;
|
||||
Object *o;
|
||||
void *d;
|
||||
size_t rsize;
|
||||
|
||||
r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
|
||||
if (r < 0)
|
||||
@ -1383,42 +1411,13 @@ int journal_file_find_data_object_with_hash(
|
||||
if (le64toh(o->data.hash) != hash)
|
||||
goto next;
|
||||
|
||||
c = COMPRESSION_FROM_OBJECT(o);
|
||||
if (c < 0)
|
||||
return -EPROTONOSUPPORT;
|
||||
if (c != COMPRESSION_NONE) {
|
||||
#if HAVE_COMPRESSION
|
||||
uint64_t l;
|
||||
size_t rsize = 0;
|
||||
r = journal_file_data_payload(f, o, p, NULL, 0, 0, &d, &rsize);
|
||||
if (r < 0)
|
||||
return r;
|
||||
assert(r > 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
|
||||
|
||||
l = le64toh(READ_NOW(o->object.size));
|
||||
if (l <= offsetof(Object, data.payload))
|
||||
return -EBADMSG;
|
||||
|
||||
l -= offsetof(Object, data.payload);
|
||||
|
||||
r = decompress_blob(c, o->data.payload, l, &f->compress_buffer, &rsize, 0);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (rsize == size &&
|
||||
memcmp(f->compress_buffer, data, size) == 0) {
|
||||
|
||||
if (ret)
|
||||
*ret = o;
|
||||
|
||||
if (ret_offset)
|
||||
*ret_offset = p;
|
||||
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else if (le64toh(o->object.size) == osize &&
|
||||
memcmp(o->data.payload, data, size) == 0) {
|
||||
|
||||
if (ret)
|
||||
if (memcmp_nn(data, size, d, rsize) == 0) {
|
||||
if (ret)
|
||||
*ret = o;
|
||||
|
||||
if (ret_offset)
|
||||
@ -1549,15 +1548,35 @@ static int journal_file_append_field(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Compression maybe_compress_payload(JournalFile *f, uint8_t *dst, const uint8_t *src, uint64_t size, size_t *rsize) {
|
||||
Compression compression = COMPRESSION_NONE;
|
||||
|
||||
#if HAVE_COMPRESSION
|
||||
if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
|
||||
compression = compress_blob(src, size, dst, size - 1, rsize);
|
||||
if (compression > 0) {
|
||||
log_debug("Compressed data object %"PRIu64" -> %zu using %s",
|
||||
size, *rsize, compression_to_string(compression));
|
||||
} else
|
||||
/* Compression didn't work, we don't really care why, let's continue without compression */
|
||||
compression = COMPRESSION_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
return compression;
|
||||
}
|
||||
|
||||
static int journal_file_append_data(
|
||||
JournalFile *f,
|
||||
const void *data, uint64_t size,
|
||||
Object **ret, uint64_t *ret_offset) {
|
||||
|
||||
uint64_t hash, p, fp, osize;
|
||||
uint64_t hash, p, osize;
|
||||
Object *o, *fo;
|
||||
int r, compression = 0;
|
||||
size_t rsize = 0;
|
||||
Compression c;
|
||||
const void *eq;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
|
||||
@ -1576,32 +1595,20 @@ static int journal_file_append_data(
|
||||
if (!eq)
|
||||
return -EINVAL;
|
||||
|
||||
osize = offsetof(Object, data.payload) + size;
|
||||
osize = journal_file_data_payload_offset(f) + size;
|
||||
r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
o->data.hash = htole64(hash);
|
||||
|
||||
#if HAVE_COMPRESSION
|
||||
if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
|
||||
size_t rsize = 0;
|
||||
c = maybe_compress_payload(f, journal_file_data_payload_field(f, o), data, size, &rsize);
|
||||
|
||||
compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
|
||||
if (compression > COMPRESSION_NONE) {
|
||||
o->object.size = htole64(offsetof(Object, data.payload) + rsize);
|
||||
o->object.flags |= COMPRESSION_TO_OBJECT_FLAG(compression);
|
||||
|
||||
log_debug("Compressed data object %"PRIu64" -> %zu using %s",
|
||||
size, rsize, compression_to_string(compression));
|
||||
} else
|
||||
/* Compression didn't work, we don't really care why, let's continue without compression */
|
||||
compression = COMPRESSION_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (compression == 0)
|
||||
memcpy_safe(o->data.payload, data, size);
|
||||
if (c != COMPRESSION_NONE) {
|
||||
o->object.size = htole64(journal_file_data_payload_offset(f) + rsize);
|
||||
o->object.flags |= COMPRESSION_TO_OBJECT_FLAG(c);
|
||||
} else
|
||||
memcpy_safe(journal_file_data_payload_field(f, o), data, size);
|
||||
|
||||
r = journal_file_link_data(f, o, p, hash);
|
||||
if (r < 0)
|
||||
@ -1619,7 +1626,7 @@ static int journal_file_append_data(
|
||||
#endif
|
||||
|
||||
/* Create field object ... */
|
||||
r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
|
||||
r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, NULL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -1636,8 +1643,110 @@ static int journal_file_append_data(
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t journal_file_entry_n_items(Object *o) {
|
||||
static int maybe_decompress_payload(
|
||||
JournalFile *f,
|
||||
uint8_t *payload,
|
||||
uint64_t size,
|
||||
Compression compression,
|
||||
const char *field,
|
||||
size_t field_length,
|
||||
size_t data_threshold,
|
||||
void **ret_data,
|
||||
size_t *ret_size) {
|
||||
|
||||
/* We can't read objects larger than 4G on a 32bit machine */
|
||||
if ((uint64_t) (size_t) size != size)
|
||||
return -E2BIG;
|
||||
|
||||
if (compression != COMPRESSION_NONE) {
|
||||
#if HAVE_COMPRESSION
|
||||
size_t rsize;
|
||||
int r;
|
||||
|
||||
if (field) {
|
||||
r = decompress_startswith(compression, payload, size, &f->compress_buffer, field,
|
||||
field_length, '=');
|
||||
if (r < 0)
|
||||
return log_debug_errno(r,
|
||||
"Cannot decompress %s object of length %" PRIu64 ": %m",
|
||||
compression_to_string(compression),
|
||||
size);
|
||||
if (r == 0) {
|
||||
*ret_data = NULL;
|
||||
*ret_size = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
r = decompress_blob(compression, payload, size, &f->compress_buffer, &rsize, 0);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (ret_data)
|
||||
*ret_data = f->compress_buffer;
|
||||
if (ret_size)
|
||||
*ret_size = rsize;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
if (field && (size < field_length + 1 || memcmp(payload, field, field_length) != 0 || payload[field_length] != '=')) {
|
||||
*ret_data = NULL;
|
||||
*ret_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret_data)
|
||||
*ret_data = payload;
|
||||
if (ret_size)
|
||||
*ret_size = (size_t) size;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int journal_file_data_payload(
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
uint64_t offset,
|
||||
const char *field,
|
||||
size_t field_length,
|
||||
size_t data_threshold,
|
||||
void **ret_data,
|
||||
size_t *ret_size) {
|
||||
|
||||
uint64_t size;
|
||||
Compression c;
|
||||
int r;
|
||||
|
||||
assert(!field == (field_length == 0)); /* These must be specified together. */
|
||||
assert(ret_data);
|
||||
assert(ret_size);
|
||||
|
||||
if (!o) {
|
||||
r = journal_file_move_to_object(f, OBJECT_DATA, offset, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
size = le64toh(READ_NOW(o->object.size));
|
||||
if (size < journal_file_data_payload_offset(f))
|
||||
return -EBADMSG;
|
||||
|
||||
size -= journal_file_data_payload_offset(f);
|
||||
|
||||
c = COMPRESSION_FROM_OBJECT(o);
|
||||
if (c < 0)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
return maybe_decompress_payload(f, journal_file_data_payload_field(f, o), size, c, field,
|
||||
field_length, data_threshold, ret_data, ret_size);
|
||||
}
|
||||
|
||||
uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) {
|
||||
uint64_t sz;
|
||||
|
||||
assert(f);
|
||||
assert(o);
|
||||
|
||||
if (o->object.type != OBJECT_ENTRY)
|
||||
@ -1647,10 +1756,10 @@ uint64_t journal_file_entry_n_items(Object *o) {
|
||||
if (sz < offsetof(Object, entry.items))
|
||||
return 0;
|
||||
|
||||
return (sz - offsetof(Object, entry.items)) / sizeof(EntryItem);
|
||||
return (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f);
|
||||
}
|
||||
|
||||
uint64_t journal_file_entry_array_n_items(Object *o) {
|
||||
uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) {
|
||||
uint64_t sz;
|
||||
|
||||
assert(o);
|
||||
@ -1662,7 +1771,7 @@ uint64_t journal_file_entry_array_n_items(Object *o) {
|
||||
if (sz < offsetof(Object, entry_array.items))
|
||||
return 0;
|
||||
|
||||
return (sz - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
|
||||
return (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f);
|
||||
}
|
||||
|
||||
uint64_t journal_file_hash_table_n_items(Object *o) {
|
||||
@ -1680,9 +1789,22 @@ uint64_t journal_file_hash_table_n_items(Object *o) {
|
||||
return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
|
||||
}
|
||||
|
||||
static void write_entry_array_item(JournalFile *f, Object *o, uint64_t i, uint64_t p) {
|
||||
assert(f);
|
||||
assert(o);
|
||||
|
||||
if (JOURNAL_HEADER_COMPACT(f->header)) {
|
||||
assert(p <= UINT32_MAX);
|
||||
o->entry_array.items.compact[i] = htole32(p);
|
||||
} else
|
||||
o->entry_array.items.regular[i] = htole64(p);
|
||||
}
|
||||
|
||||
static int link_entry_into_array(JournalFile *f,
|
||||
le64_t *first,
|
||||
le64_t *idx,
|
||||
le32_t *tail,
|
||||
le32_t *tidx,
|
||||
uint64_t p) {
|
||||
int r;
|
||||
uint64_t n = 0, ap = 0, q, i, a, hidx;
|
||||
@ -1694,18 +1816,21 @@ static int link_entry_into_array(JournalFile *f,
|
||||
assert(idx);
|
||||
assert(p > 0);
|
||||
|
||||
a = le64toh(*first);
|
||||
i = hidx = le64toh(READ_NOW(*idx));
|
||||
a = tail ? le32toh(*tail) : le64toh(*first);
|
||||
hidx = le64toh(READ_NOW(*idx));
|
||||
i = tidx ? le32toh(READ_NOW(*tidx)) : hidx;
|
||||
while (a > 0) {
|
||||
|
||||
r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
n = journal_file_entry_array_n_items(o);
|
||||
n = journal_file_entry_array_n_items(f, o);
|
||||
if (i < n) {
|
||||
o->entry_array.items[i] = htole64(p);
|
||||
write_entry_array_item(f, o, i, p);
|
||||
*idx = htole64(hidx + 1);
|
||||
if (tidx)
|
||||
*tidx = htole32(le32toh(*tidx) + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1723,7 +1848,7 @@ static int link_entry_into_array(JournalFile *f,
|
||||
n = 4;
|
||||
|
||||
r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
|
||||
offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
|
||||
offsetof(Object, entry_array.items) + n * journal_file_entry_array_item_size(f),
|
||||
&o, &q);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -1734,7 +1859,7 @@ static int link_entry_into_array(JournalFile *f,
|
||||
return r;
|
||||
#endif
|
||||
|
||||
o->entry_array.items[i] = htole64(p);
|
||||
write_entry_array_item(f, o, i, p);
|
||||
|
||||
if (ap == 0)
|
||||
*first = htole64(q);
|
||||
@ -1746,10 +1871,15 @@ static int link_entry_into_array(JournalFile *f,
|
||||
o->entry_array.next_entry_array_offset = htole64(q);
|
||||
}
|
||||
|
||||
if (tail)
|
||||
*tail = htole32(q);
|
||||
|
||||
if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
|
||||
f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
|
||||
|
||||
*idx = htole64(hidx + 1);
|
||||
if (tidx)
|
||||
*tidx = htole32(1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1758,6 +1888,8 @@ static int link_entry_into_array_plus_one(JournalFile *f,
|
||||
le64_t *extra,
|
||||
le64_t *first,
|
||||
le64_t *idx,
|
||||
le32_t *tail,
|
||||
le32_t *tidx,
|
||||
uint64_t p) {
|
||||
|
||||
uint64_t hidx;
|
||||
@ -1778,7 +1910,7 @@ static int link_entry_into_array_plus_one(JournalFile *f,
|
||||
le64_t i;
|
||||
|
||||
i = htole64(hidx - 1);
|
||||
r = link_entry_into_array(f, first, &i, p);
|
||||
r = link_entry_into_array(f, first, &i, tail, tidx, p);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
@ -1787,15 +1919,13 @@ static int link_entry_into_array_plus_one(JournalFile *f,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
|
||||
uint64_t p;
|
||||
static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t p) {
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
assert(o);
|
||||
assert(offset > 0);
|
||||
|
||||
p = le64toh(o->entry.items[i].object_offset);
|
||||
r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -1804,11 +1934,18 @@ static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offs
|
||||
&o->data.entry_offset,
|
||||
&o->data.entry_array_offset,
|
||||
&o->data.n_entries,
|
||||
JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_offset : NULL,
|
||||
JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_n_entries : NULL,
|
||||
offset);
|
||||
}
|
||||
|
||||
static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
|
||||
uint64_t n;
|
||||
static int journal_file_link_entry(
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
uint64_t offset,
|
||||
const EntryItem items[],
|
||||
size_t n_items) {
|
||||
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
@ -1825,6 +1962,8 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
|
||||
r = link_entry_into_array(f,
|
||||
&f->header->entry_array_offset,
|
||||
&f->header->n_entries,
|
||||
JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset) ? &f->header->tail_entry_array_offset : NULL,
|
||||
JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) ? &f->header->tail_entry_array_n_entries : NULL,
|
||||
offset);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -1838,15 +1977,14 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
|
||||
f->header->tail_entry_monotonic = o->entry.monotonic;
|
||||
|
||||
/* Link up the items */
|
||||
n = journal_file_entry_n_items(o);
|
||||
for (uint64_t i = 0; i < n; i++) {
|
||||
for (uint64_t i = 0; i < n_items; i++) {
|
||||
int k;
|
||||
|
||||
/* If we fail to link an entry item because we can't allocate a new entry array, don't fail
|
||||
* immediately but try to link the other entry items since it might still be possible to link
|
||||
* those if they don't require a new entry array to be allocated. */
|
||||
|
||||
k = journal_file_link_entry_item(f, o, offset, i);
|
||||
k = journal_file_link_entry_item(f, o, offset, items[i].object_offset);
|
||||
if (k == -E2BIG)
|
||||
r = k;
|
||||
else if (k < 0)
|
||||
@ -1856,12 +1994,26 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
|
||||
return r;
|
||||
}
|
||||
|
||||
static void write_entry_item(JournalFile *f, Object *o, uint64_t i, const EntryItem *item) {
|
||||
assert(f);
|
||||
assert(o);
|
||||
assert(item);
|
||||
|
||||
if (JOURNAL_HEADER_COMPACT(f->header)) {
|
||||
assert(item->object_offset <= UINT32_MAX);
|
||||
o->entry.items.compact[i].object_offset = htole32(item->object_offset);
|
||||
} else {
|
||||
o->entry.items.regular[i].object_offset = htole64(item->object_offset);
|
||||
o->entry.items.regular[i].hash = htole64(item->hash);
|
||||
}
|
||||
}
|
||||
|
||||
static int journal_file_append_entry_internal(
|
||||
JournalFile *f,
|
||||
const dual_timestamp *ts,
|
||||
const sd_id128_t *boot_id,
|
||||
uint64_t xor_hash,
|
||||
const EntryItem items[], unsigned n_items,
|
||||
const EntryItem items[], size_t n_items,
|
||||
uint64_t *seqnum,
|
||||
Object **ret, uint64_t *ret_offset) {
|
||||
uint64_t np;
|
||||
@ -1874,14 +2026,13 @@ static int journal_file_append_entry_internal(
|
||||
assert(items || n_items == 0);
|
||||
assert(ts);
|
||||
|
||||
osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
|
||||
osize = offsetof(Object, entry.items) + (n_items * journal_file_entry_item_size(f));
|
||||
|
||||
r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
|
||||
memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
|
||||
o->entry.realtime = htole64(ts->realtime);
|
||||
o->entry.monotonic = htole64(ts->monotonic);
|
||||
o->entry.xor_hash = htole64(xor_hash);
|
||||
@ -1889,13 +2040,16 @@ static int journal_file_append_entry_internal(
|
||||
f->header->boot_id = *boot_id;
|
||||
o->entry.boot_id = f->header->boot_id;
|
||||
|
||||
for (size_t i = 0; i < n_items; i++)
|
||||
write_entry_item(f, o, i, &items[i]);
|
||||
|
||||
#if HAVE_GCRYPT
|
||||
r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
|
||||
if (r < 0)
|
||||
return r;
|
||||
#endif
|
||||
|
||||
r = journal_file_link_entry(f, o, np);
|
||||
r = journal_file_link_entry(f, o, np, items, n_items);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -1998,12 +2152,10 @@ int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t)
|
||||
}
|
||||
|
||||
static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
|
||||
return CMP(le64toh(a->object_offset), le64toh(b->object_offset));
|
||||
return CMP(a->object_offset, b->object_offset);
|
||||
}
|
||||
|
||||
static size_t remove_duplicate_entry_items(EntryItem items[], size_t n) {
|
||||
|
||||
/* This function relies on the items array being sorted. */
|
||||
size_t j = 1;
|
||||
|
||||
if (n <= 1)
|
||||
@ -2078,8 +2230,8 @@ int journal_file_append_entry(
|
||||
xor_hash ^= le64toh(o->data.hash);
|
||||
|
||||
items[i] = (EntryItem) {
|
||||
.object_offset = htole64(p),
|
||||
.hash = o->data.hash,
|
||||
.object_offset = p,
|
||||
.hash = le64toh(o->data.hash),
|
||||
};
|
||||
}
|
||||
|
||||
@ -2255,7 +2407,7 @@ static int generic_array_get(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
k = journal_file_entry_array_n_items(o);
|
||||
k = journal_file_entry_array_n_items(f, o);
|
||||
if (i < k)
|
||||
break;
|
||||
|
||||
@ -2275,7 +2427,7 @@ static int generic_array_get(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
k = journal_file_entry_array_n_items(o);
|
||||
k = journal_file_entry_array_n_items(f, o);
|
||||
if (k == 0)
|
||||
break;
|
||||
|
||||
@ -2283,12 +2435,12 @@ static int generic_array_get(
|
||||
}
|
||||
|
||||
do {
|
||||
p = le64toh(o->entry_array.items[i]);
|
||||
p = journal_file_entry_array_item(f, o, i);
|
||||
|
||||
r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret);
|
||||
if (r >= 0) {
|
||||
/* Let's cache this item for the next invocation */
|
||||
chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
|
||||
chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, o, 0), t, i);
|
||||
|
||||
if (ret_offset)
|
||||
*ret_offset = p;
|
||||
@ -2416,13 +2568,13 @@ static int generic_array_bisect(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
k = journal_file_entry_array_n_items(array);
|
||||
k = journal_file_entry_array_n_items(f, array);
|
||||
right = MIN(k, n);
|
||||
if (right <= 0)
|
||||
return 0;
|
||||
|
||||
i = right - 1;
|
||||
lp = p = le64toh(array->entry_array.items[i]);
|
||||
lp = p = journal_file_entry_array_item(f, array, i);
|
||||
if (p <= 0)
|
||||
r = -EBADMSG;
|
||||
else
|
||||
@ -2455,7 +2607,7 @@ static int generic_array_bisect(
|
||||
if (last_index > 0) {
|
||||
uint64_t x = last_index - 1;
|
||||
|
||||
p = le64toh(array->entry_array.items[x]);
|
||||
p = journal_file_entry_array_item(f, array, x);
|
||||
if (p <= 0)
|
||||
return -EBADMSG;
|
||||
|
||||
@ -2475,7 +2627,7 @@ static int generic_array_bisect(
|
||||
if (last_index < right) {
|
||||
uint64_t y = last_index + 1;
|
||||
|
||||
p = le64toh(array->entry_array.items[y]);
|
||||
p = journal_file_entry_array_item(f, array, y);
|
||||
if (p <= 0)
|
||||
return -EBADMSG;
|
||||
|
||||
@ -2505,7 +2657,7 @@ static int generic_array_bisect(
|
||||
assert(left < right);
|
||||
i = (left + right) / 2;
|
||||
|
||||
p = le64toh(array->entry_array.items[i]);
|
||||
p = journal_file_entry_array_item(f, array, i);
|
||||
if (p <= 0)
|
||||
r = -EBADMSG;
|
||||
else
|
||||
@ -2553,14 +2705,14 @@ found:
|
||||
return 0;
|
||||
|
||||
/* Let's cache this item for the next invocation */
|
||||
chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : UINT64_MAX) : i);
|
||||
chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, array, 0), t, subtract_one ? (i > 0 ? i-1 : UINT64_MAX) : i);
|
||||
|
||||
if (subtract_one && i == 0)
|
||||
p = last_p;
|
||||
else if (subtract_one)
|
||||
p = le64toh(array->entry_array.items[i-1]);
|
||||
p = journal_file_entry_array_item(f, array, i - 1);
|
||||
else
|
||||
p = le64toh(array->entry_array.items[i]);
|
||||
p = journal_file_entry_array_item(f, array, i);
|
||||
|
||||
if (ret) {
|
||||
r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret);
|
||||
@ -3229,7 +3381,7 @@ void journal_file_print_header(JournalFile *f) {
|
||||
"Sequential number ID: %s\n"
|
||||
"State: %s\n"
|
||||
"Compatible flags:%s%s\n"
|
||||
"Incompatible flags:%s%s%s%s%s\n"
|
||||
"Incompatible flags:%s%s%s%s%s%s\n"
|
||||
"Header size: %"PRIu64"\n"
|
||||
"Arena size: %"PRIu64"\n"
|
||||
"Data hash table size: %"PRIu64"\n"
|
||||
@ -3256,6 +3408,7 @@ void journal_file_print_header(JournalFile *f) {
|
||||
JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
|
||||
JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
|
||||
JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
|
||||
JOURNAL_HEADER_COMPACT(f->header) ? " COMPACT" : "",
|
||||
(le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
|
||||
le64toh(f->header->header_size),
|
||||
le64toh(f->header->arena_size),
|
||||
@ -3336,7 +3489,7 @@ static int journal_file_warn_btrfs(JournalFile *f) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void journal_default_metrics(JournalMetrics *m, int fd) {
|
||||
static void journal_default_metrics(JournalMetrics *m, int fd, bool compact) {
|
||||
struct statvfs ss;
|
||||
uint64_t fs_size = 0;
|
||||
|
||||
@ -3379,6 +3532,9 @@ static void journal_default_metrics(JournalMetrics *m, int fd) {
|
||||
else
|
||||
m->max_size = PAGE_ALIGN(m->max_size);
|
||||
|
||||
if (compact && m->max_size > JOURNAL_COMPACT_SIZE_MAX)
|
||||
m->max_size = JOURNAL_COMPACT_SIZE_MAX;
|
||||
|
||||
if (m->max_size != 0) {
|
||||
if (m->max_size < JOURNAL_FILE_SIZE_MIN)
|
||||
m->max_size = JOURNAL_FILE_SIZE_MIN;
|
||||
@ -3570,7 +3726,7 @@ int journal_file_open(
|
||||
|
||||
if (journal_file_writable(f)) {
|
||||
if (metrics) {
|
||||
journal_default_metrics(metrics, f->fd);
|
||||
journal_default_metrics(metrics, f->fd, JOURNAL_HEADER_COMPACT(f->header));
|
||||
f->metrics = *metrics;
|
||||
} else if (template)
|
||||
f->metrics = template->metrics;
|
||||
@ -3748,55 +3904,24 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6
|
||||
};
|
||||
boot_id = &o->entry.boot_id;
|
||||
|
||||
n = journal_file_entry_n_items(o);
|
||||
n = journal_file_entry_n_items(from, o);
|
||||
items = newa(EntryItem, n);
|
||||
|
||||
for (uint64_t i = 0; i < n; i++) {
|
||||
Compression c;
|
||||
uint64_t l, h;
|
||||
size_t t;
|
||||
uint64_t h;
|
||||
void *data;
|
||||
size_t l;
|
||||
Object *u;
|
||||
|
||||
q = le64toh(o->entry.items[i].object_offset);
|
||||
|
||||
r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
|
||||
q = journal_file_entry_item_object_offset(from, o, i);
|
||||
r = journal_file_data_payload(from, NULL, q, NULL, 0, 0, &data, &l);
|
||||
if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
|
||||
log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
|
||||
continue;
|
||||
}
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
l = le64toh(READ_NOW(o->object.size));
|
||||
if (l < offsetof(Object, data.payload))
|
||||
return -EBADMSG;
|
||||
|
||||
l -= offsetof(Object, data.payload);
|
||||
t = (size_t) l;
|
||||
|
||||
/* We hit the limit on 32bit machines */
|
||||
if ((uint64_t) t != l)
|
||||
return -E2BIG;
|
||||
|
||||
c = COMPRESSION_FROM_OBJECT(o);
|
||||
if (c < 0)
|
||||
return -EPROTONOSUPPORT;
|
||||
if (c != COMPRESSION_NONE) {
|
||||
#if HAVE_COMPRESSION
|
||||
size_t rsize = 0;
|
||||
|
||||
r = decompress_blob(
|
||||
c,
|
||||
o->data.payload, l,
|
||||
&from->compress_buffer, &rsize,
|
||||
0);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
data = from->compress_buffer;
|
||||
l = rsize;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else
|
||||
data = o->data.payload;
|
||||
assert(r > 0);
|
||||
|
||||
if (l == 0)
|
||||
return -EBADMSG;
|
||||
@ -3811,13 +3936,9 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6
|
||||
xor_hash ^= le64toh(u->data.hash);
|
||||
|
||||
items[i] = (EntryItem) {
|
||||
.object_offset = htole64(h),
|
||||
.hash = u->data.hash,
|
||||
.object_offset = h,
|
||||
.hash = le64toh(u->data.hash),
|
||||
};
|
||||
|
||||
r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n, NULL, NULL, NULL);
|
||||
|
@ -127,6 +127,11 @@ typedef enum JournalFileFlags {
|
||||
JOURNAL_SEAL = 1 << 1,
|
||||
} JournalFileFlags;
|
||||
|
||||
typedef struct {
|
||||
uint64_t object_offset;
|
||||
uint64_t hash;
|
||||
} EntryItem;
|
||||
|
||||
int journal_file_open(
|
||||
int fd,
|
||||
const char *fname,
|
||||
@ -184,14 +189,64 @@ static inline bool VALID_EPOCH(uint64_t u) {
|
||||
#define JOURNAL_HEADER_KEYED_HASH(h) \
|
||||
FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_KEYED_HASH)
|
||||
|
||||
#define JOURNAL_HEADER_COMPACT(h) \
|
||||
FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_COMPACT)
|
||||
|
||||
int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret);
|
||||
int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t offset, Object *ret);
|
||||
|
||||
int journal_file_tail_end_by_pread(JournalFile *f, uint64_t *ret_offset);
|
||||
int journal_file_tail_end_by_mmap(JournalFile *f, uint64_t *ret_offset);
|
||||
|
||||
uint64_t journal_file_entry_n_items(Object *o) _pure_;
|
||||
uint64_t journal_file_entry_array_n_items(Object *o) _pure_;
|
||||
static inline uint64_t journal_file_entry_item_object_offset(JournalFile *f, Object *o, size_t i) {
|
||||
assert(f);
|
||||
assert(o);
|
||||
return JOURNAL_HEADER_COMPACT(f->header) ? le32toh(o->entry.items.compact[i].object_offset) :
|
||||
le64toh(o->entry.items.regular[i].object_offset);
|
||||
}
|
||||
|
||||
static inline size_t journal_file_entry_item_size(JournalFile *f) {
|
||||
assert(f);
|
||||
return JOURNAL_HEADER_COMPACT(f->header) ? sizeof_field(Object, entry.items.compact[0]) :
|
||||
sizeof_field(Object, entry.items.regular[0]);
|
||||
}
|
||||
|
||||
uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) _pure_;
|
||||
|
||||
int journal_file_data_payload(
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
uint64_t offset,
|
||||
const char *field,
|
||||
size_t field_length,
|
||||
size_t data_threshold,
|
||||
void **ret_data,
|
||||
size_t *ret_size);
|
||||
|
||||
static inline size_t journal_file_data_payload_offset(JournalFile *f) {
|
||||
return JOURNAL_HEADER_COMPACT(f->header)
|
||||
? offsetof(Object, data.compact.payload)
|
||||
: offsetof(Object, data.regular.payload);
|
||||
}
|
||||
|
||||
static inline uint8_t* journal_file_data_payload_field(JournalFile *f, Object *o) {
|
||||
return JOURNAL_HEADER_COMPACT(f->header) ? o->data.compact.payload : o->data.regular.payload;
|
||||
}
|
||||
|
||||
uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) _pure_;
|
||||
|
||||
static inline uint64_t journal_file_entry_array_item(JournalFile *f, Object *o, size_t i) {
|
||||
assert(f);
|
||||
assert(o);
|
||||
return JOURNAL_HEADER_COMPACT(f->header) ? le32toh(o->entry_array.items.compact[i]) :
|
||||
le64toh(o->entry_array.items.regular[i]);
|
||||
}
|
||||
|
||||
static inline size_t journal_file_entry_array_item_size(JournalFile *f) {
|
||||
assert(f);
|
||||
return JOURNAL_HEADER_COMPACT(f->header) ? sizeof(le32_t) : sizeof(le64_t);
|
||||
}
|
||||
|
||||
uint64_t journal_file_hash_table_n_items(Object *o) _pure_;
|
||||
|
||||
int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *ret_offset);
|
||||
|
@ -170,16 +170,16 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
if (le64toh(o->object.size) - offsetof(Object, data.payload) <= 0) {
|
||||
if (le64toh(o->object.size) - journal_file_data_payload_offset(f) <= 0) {
|
||||
error(offset, "Bad object size (<= %zu): %"PRIu64,
|
||||
offsetof(Object, data.payload),
|
||||
journal_file_data_payload_offset(f),
|
||||
le64toh(o->object.size));
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
h1 = le64toh(o->data.hash);
|
||||
r = hash_payload(f, o, offset, o->data.payload,
|
||||
le64toh(o->object.size) - offsetof(Object, data.payload),
|
||||
r = hash_payload(f, o, offset, journal_file_data_payload_field(f, o),
|
||||
le64toh(o->object.size) - journal_file_data_payload_offset(f),
|
||||
&h2);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -240,7 +240,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
}
|
||||
|
||||
case OBJECT_ENTRY:
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry.items)) % sizeof(EntryItem) != 0) {
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0) {
|
||||
error(offset,
|
||||
"Bad entry size (<= %zu): %"PRIu64,
|
||||
offsetof(Object, entry.items),
|
||||
@ -248,10 +248,10 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem) <= 0) {
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0) {
|
||||
error(offset,
|
||||
"Invalid number items in entry: %"PRIu64,
|
||||
(le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem));
|
||||
(le64toh(o->object.size) - offsetof(Object, entry.items)) / journal_file_entry_item_size(f));
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
@ -276,13 +276,13 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
for (uint64_t i = 0; i < journal_file_entry_n_items(o); i++) {
|
||||
if (le64toh(o->entry.items[i].object_offset) == 0 ||
|
||||
!VALID64(le64toh(o->entry.items[i].object_offset))) {
|
||||
for (uint64_t i = 0; i < journal_file_entry_n_items(f, o); i++) {
|
||||
if (journal_file_entry_item_object_offset(f, o, i) == 0 ||
|
||||
!VALID64(journal_file_entry_item_object_offset(f, o, i))) {
|
||||
error(offset,
|
||||
"Invalid entry item (%"PRIu64"/%"PRIu64") offset: "OFSfmt,
|
||||
i, journal_file_entry_n_items(o),
|
||||
le64toh(o->entry.items[i].object_offset));
|
||||
i, journal_file_entry_n_items(f, o),
|
||||
journal_file_entry_item_object_offset(f, o, i));
|
||||
return -EBADMSG;
|
||||
}
|
||||
}
|
||||
@ -335,8 +335,8 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
break;
|
||||
|
||||
case OBJECT_ENTRY_ARRAY:
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry_array.items)) % sizeof(le64_t) != 0 ||
|
||||
(le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(le64_t) <= 0) {
|
||||
if ((le64toh(o->object.size) - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
|
||||
(le64toh(o->object.size) - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0) {
|
||||
error(offset,
|
||||
"Invalid object entry array size: %"PRIu64,
|
||||
le64toh(o->object.size));
|
||||
@ -350,15 +350,15 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
for (uint64_t i = 0; i < journal_file_entry_array_n_items(o); i++)
|
||||
if (le64toh(o->entry_array.items[i]) != 0 &&
|
||||
!VALID64(le64toh(o->entry_array.items[i]))) {
|
||||
for (uint64_t i = 0; i < journal_file_entry_array_n_items(f, o); i++) {
|
||||
uint64_t q = journal_file_entry_array_item(f, o, i);
|
||||
if (q != 0 && !VALID64(q)) {
|
||||
error(offset,
|
||||
"Invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt,
|
||||
i, journal_file_entry_array_n_items(o),
|
||||
le64toh(o->entry_array.items[i]));
|
||||
i, journal_file_entry_array_n_items(f, o), q);
|
||||
return -EBADMSG;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
@ -490,10 +490,10 @@ static int verify_data(
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
m = journal_file_entry_array_n_items(o);
|
||||
m = journal_file_entry_array_n_items(f, o);
|
||||
for (j = 0; i < n && j < m; i++, j++) {
|
||||
|
||||
q = le64toh(o->entry_array.items[j]);
|
||||
q = journal_file_entry_array_item(f, o, j);
|
||||
if (q <= last) {
|
||||
error(p, "Data object's entry array not sorted (%"PRIu64" <= %"PRIu64")", q, last);
|
||||
return -EBADMSG;
|
||||
@ -646,12 +646,12 @@ static int verify_entry(
|
||||
assert(o);
|
||||
assert(cache_data_fd);
|
||||
|
||||
n = journal_file_entry_n_items(o);
|
||||
n = journal_file_entry_n_items(f, o);
|
||||
for (i = 0; i < n; i++) {
|
||||
uint64_t q;
|
||||
Object *u;
|
||||
|
||||
q = le64toh(o->entry.items[i].object_offset);
|
||||
q = journal_file_entry_item_object_offset(f, o, i);
|
||||
|
||||
if (!contains_uint64(cache_data_fd, n_data, q)) {
|
||||
error(p, "Invalid data object of entry");
|
||||
@ -737,11 +737,11 @@ static int verify_entry_array(
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
m = journal_file_entry_array_n_items(o);
|
||||
m = journal_file_entry_array_n_items(f, o);
|
||||
for (j = 0; i < n && j < m; i++, j++) {
|
||||
uint64_t p;
|
||||
|
||||
p = le64toh(o->entry_array.items[j]);
|
||||
p = journal_file_entry_array_item(f, o, j);
|
||||
if (p <= last) {
|
||||
error(a, "Entry array not sorted at %"PRIu64" of %"PRIu64, i, n);
|
||||
return -EBADMSG;
|
||||
|
@ -2287,15 +2287,16 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void **
|
||||
|
||||
field_length = strlen(field);
|
||||
|
||||
uint64_t n = journal_file_entry_n_items(o);
|
||||
uint64_t n = journal_file_entry_n_items(f, o);
|
||||
for (uint64_t i = 0; i < n; i++) {
|
||||
Object *d;
|
||||
uint64_t p, l;
|
||||
size_t t;
|
||||
Compression c;
|
||||
uint64_t p;
|
||||
void *d;
|
||||
size_t l;
|
||||
|
||||
p = le64toh(o->entry.items[i].object_offset);
|
||||
r = journal_file_move_to_object(f, OBJECT_DATA, p, &d);
|
||||
p = journal_file_entry_item_object_offset(f, o, i);
|
||||
r = journal_file_data_payload(f, NULL, p, field, field_length, j->data_threshold, &d, &l);
|
||||
if (r == 0)
|
||||
continue;
|
||||
if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
|
||||
log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
|
||||
continue;
|
||||
@ -2303,117 +2304,15 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void **
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
l = le64toh(d->object.size) - offsetof(Object, data.payload);
|
||||
*data = d;
|
||||
*size = l;
|
||||
|
||||
c = COMPRESSION_FROM_OBJECT(d);
|
||||
if (c < 0)
|
||||
return -EPROTONOSUPPORT;
|
||||
if (c != COMPRESSION_NONE) {
|
||||
#if HAVE_COMPRESSION
|
||||
r = decompress_startswith(
|
||||
c,
|
||||
d->data.payload, l,
|
||||
&f->compress_buffer,
|
||||
field, field_length, '=');
|
||||
if (r < 0)
|
||||
log_debug_errno(r, "Cannot decompress %s object of length %"PRIu64" at offset "OFSfmt": %m",
|
||||
compression_to_string(c), l, p);
|
||||
else if (r > 0) {
|
||||
|
||||
size_t rsize;
|
||||
|
||||
r = decompress_blob(
|
||||
c,
|
||||
d->data.payload, l,
|
||||
&f->compress_buffer, &rsize,
|
||||
j->data_threshold);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
*data = f->compress_buffer;
|
||||
*size = (size_t) rsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else if (l >= field_length+1 &&
|
||||
memcmp(d->data.payload, field, field_length) == 0 &&
|
||||
d->data.payload[field_length] == '=') {
|
||||
|
||||
t = (size_t) l;
|
||||
|
||||
if ((uint64_t) t != l)
|
||||
return -E2BIG;
|
||||
|
||||
*data = d->data.payload;
|
||||
*size = t;
|
||||
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int return_data(
|
||||
sd_journal *j,
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
const void **ret_data,
|
||||
size_t *ret_size) {
|
||||
|
||||
Compression c;
|
||||
uint64_t l;
|
||||
size_t t;
|
||||
|
||||
assert(j);
|
||||
assert(f);
|
||||
|
||||
l = le64toh(READ_NOW(o->object.size));
|
||||
if (l < offsetof(Object, data.payload))
|
||||
return -EBADMSG;
|
||||
l -= offsetof(Object, data.payload);
|
||||
|
||||
/* We can't read objects larger than 4G on a 32bit machine */
|
||||
t = (size_t) l;
|
||||
if ((uint64_t) t != l)
|
||||
return -E2BIG;
|
||||
|
||||
c = COMPRESSION_FROM_OBJECT(o);
|
||||
if (c < 0)
|
||||
return -EPROTONOSUPPORT;
|
||||
if (c != COMPRESSION_NONE) {
|
||||
#if HAVE_COMPRESSION
|
||||
size_t rsize;
|
||||
int r;
|
||||
|
||||
r = decompress_blob(
|
||||
c,
|
||||
o->data.payload, l,
|
||||
&f->compress_buffer, &rsize,
|
||||
j->data_threshold);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (ret_data)
|
||||
*ret_data = f->compress_buffer;
|
||||
if (ret_size)
|
||||
*ret_size = (size_t) rsize;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
if (ret_data)
|
||||
*ret_data = o->data.payload;
|
||||
if (ret_size)
|
||||
*ret_size = t;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
_public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t *size) {
|
||||
JournalFile *f;
|
||||
Object *o;
|
||||
@ -2435,25 +2334,23 @@ _public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
for (uint64_t n = journal_file_entry_n_items(o); j->current_field < n; j->current_field++) {
|
||||
for (uint64_t n = journal_file_entry_n_items(f, o); j->current_field < n; j->current_field++) {
|
||||
uint64_t p;
|
||||
void *d;
|
||||
size_t l;
|
||||
|
||||
p = le64toh(o->entry.items[j->current_field].object_offset);
|
||||
r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
|
||||
p = journal_file_entry_item_object_offset(f, o, j->current_field);
|
||||
r = journal_file_data_payload(f, NULL, p, NULL, 0, j->data_threshold, &d, &l);
|
||||
if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
|
||||
log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", j->current_field);
|
||||
continue;
|
||||
}
|
||||
if (r < 0)
|
||||
return r;
|
||||
assert(r > 0);
|
||||
|
||||
r = return_data(j, f, o, data, size);
|
||||
if (r == -EBADMSG) {
|
||||
log_debug("Entry item %"PRIu64" data payload is bad, skipping over it.", j->current_field);
|
||||
continue;
|
||||
}
|
||||
if (r < 0)
|
||||
return r;
|
||||
*data = d;
|
||||
*size = l;
|
||||
|
||||
j->current_field++;
|
||||
|
||||
@ -2925,7 +2822,7 @@ _public_ int sd_journal_enumerate_unique(
|
||||
for (;;) {
|
||||
JournalFile *of;
|
||||
Object *o;
|
||||
const void *odata;
|
||||
void *odata;
|
||||
size_t ol;
|
||||
bool found;
|
||||
int r;
|
||||
@ -2969,7 +2866,8 @@ _public_ int sd_journal_enumerate_unique(
|
||||
j->unique_offset,
|
||||
o->object.type, OBJECT_DATA);
|
||||
|
||||
r = return_data(j, j->unique_file, o, &odata, &ol);
|
||||
r = journal_file_data_payload(j->unique_file, o, j->unique_offset, NULL, 0,
|
||||
j->data_threshold, &odata, &ol);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -3016,9 +2914,8 @@ _public_ int sd_journal_enumerate_unique(
|
||||
if (found)
|
||||
continue;
|
||||
|
||||
r = return_data(j, j->unique_file, o, ret_data, ret_size);
|
||||
if (r < 0)
|
||||
return r;
|
||||
*ret_data = odata;
|
||||
*ret_size = ol;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -136,3 +136,52 @@ int journal_access_check_and_warn(sd_journal *j, bool quiet, bool want_other_use
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
bool journal_shall_try_append_again(JournalFile *f, int r) {
|
||||
switch (r) {
|
||||
|
||||
case -E2BIG: /* Hit configured limit */
|
||||
case -EFBIG: /* Hit fs limit */
|
||||
case -EDQUOT: /* Quota limit hit */
|
||||
case -ENOSPC: /* Disk full */
|
||||
log_debug("%s: Allocation limit reached, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EIO: /* I/O error of some kind (mmap) */
|
||||
log_warning("%s: IO error, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EHOSTDOWN: /* Other machine */
|
||||
log_info("%s: Journal file from other machine, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EBUSY: /* Unclean shutdown */
|
||||
log_info("%s: Unclean shutdown, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EPROTONOSUPPORT: /* Unsupported feature */
|
||||
log_info("%s: Unsupported feature, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EBADMSG: /* Corrupted */
|
||||
case -ENODATA: /* Truncated */
|
||||
case -ESHUTDOWN: /* Already archived */
|
||||
log_warning("%s: Journal file corrupted, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EIDRM: /* Journal file has been deleted */
|
||||
log_warning("%s: Journal file has been deleted, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -ETXTBSY: /* Journal file is from the future */
|
||||
log_warning("%s: Journal file is from the future, rotating.", f->path);
|
||||
return true;
|
||||
|
||||
case -EAFNOSUPPORT:
|
||||
log_warning("%s: underlying file system does not support memory mapping or another required file system feature.", f->path);
|
||||
return false;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -6,5 +6,9 @@
|
||||
|
||||
#include "sd-journal.h"
|
||||
|
||||
#include "journal-internal.h"
|
||||
|
||||
int journal_access_blocked(sd_journal *j);
|
||||
int journal_access_check_and_warn(sd_journal *j, bool quiet, bool want_other_users);
|
||||
|
||||
bool journal_shall_try_append_again(JournalFile *f, int r);
|
||||
|
Loading…
Reference in New Issue
Block a user