mirror of
https://github.com/systemd/systemd.git
synced 2025-01-27 18:04:05 +03:00
import: rename download code from "import" to "pull"
That way we can call the code for local container/VM imports "import" without confusion.
This commit is contained in:
parent
cc22955cfe
commit
dc2c282b6a
20
Makefile.am
20
Makefile.am
@ -5446,16 +5446,16 @@ systemd_importd_LDADD = \
|
||||
|
||||
systemd_pull_SOURCES = \
|
||||
src/import/pull.c \
|
||||
src/import/import-raw.c \
|
||||
src/import/import-raw.h \
|
||||
src/import/import-tar.c \
|
||||
src/import/import-tar.h \
|
||||
src/import/import-dkr.c \
|
||||
src/import/import-dkr.h \
|
||||
src/import/import-job.c \
|
||||
src/import/import-job.h \
|
||||
src/import/import-common.c \
|
||||
src/import/import-common.h \
|
||||
src/import/pull-raw.c \
|
||||
src/import/pull-raw.h \
|
||||
src/import/pull-tar.c \
|
||||
src/import/pull-tar.h \
|
||||
src/import/pull-dkr.c \
|
||||
src/import/pull-dkr.h \
|
||||
src/import/pull-job.c \
|
||||
src/import/pull-job.h \
|
||||
src/import/pull-common.c \
|
||||
src/import/pull-common.h \
|
||||
src/import/curl-util.c \
|
||||
src/import/curl-util.h \
|
||||
src/import/aufs-util.c \
|
||||
|
@ -26,12 +26,12 @@
|
||||
#include "copy.h"
|
||||
#include "btrfs-util.h"
|
||||
#include "capability.h"
|
||||
#include "import-job.h"
|
||||
#include "import-common.h"
|
||||
#include "pull-job.h"
|
||||
#include "pull-common.h"
|
||||
|
||||
#define FILENAME_ESCAPE "/.#\"\'"
|
||||
|
||||
int import_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) {
|
||||
int pull_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) {
|
||||
_cleanup_free_ char *escaped_url = NULL;
|
||||
_cleanup_closedir_ DIR *d = NULL;
|
||||
_cleanup_strv_free_ char **l = NULL;
|
||||
@ -111,7 +111,7 @@ int import_find_old_etags(const char *url, const char *image_root, int dt, const
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) {
|
||||
int pull_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) {
|
||||
const char *p;
|
||||
int r;
|
||||
|
||||
@ -141,7 +141,7 @@ int import_make_local_copy(const char *final, const char *image_root, const char
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_make_read_only_fd(int fd) {
|
||||
int pull_make_read_only_fd(int fd) {
|
||||
int r;
|
||||
|
||||
assert(fd >= 0);
|
||||
@ -172,17 +172,17 @@ int import_make_read_only_fd(int fd) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_make_read_only(const char *path) {
|
||||
int pull_make_read_only(const char *path) {
|
||||
_cleanup_close_ int fd = 1;
|
||||
|
||||
fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return log_error_errno(errno, "Failed to open %s: %m", path);
|
||||
|
||||
return import_make_read_only_fd(fd);
|
||||
return pull_make_read_only_fd(fd);
|
||||
}
|
||||
|
||||
int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) {
|
||||
int pull_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) {
|
||||
_cleanup_free_ char *escaped_url = NULL;
|
||||
char *path;
|
||||
|
||||
@ -213,16 +213,16 @@ int import_make_path(const char *url, const char *etag, const char *image_root,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_make_verification_jobs(
|
||||
ImportJob **ret_checksum_job,
|
||||
ImportJob **ret_signature_job,
|
||||
int pull_make_verification_jobs(
|
||||
PullJob **ret_checksum_job,
|
||||
PullJob **ret_signature_job,
|
||||
ImportVerify verify,
|
||||
const char *url,
|
||||
CurlGlue *glue,
|
||||
ImportJobFinished on_finished,
|
||||
PullJobFinished on_finished,
|
||||
void *userdata) {
|
||||
|
||||
_cleanup_(import_job_unrefp) ImportJob *checksum_job = NULL, *signature_job = NULL;
|
||||
_cleanup_(pull_job_unrefp) PullJob *checksum_job = NULL, *signature_job = NULL;
|
||||
int r;
|
||||
|
||||
assert(ret_checksum_job);
|
||||
@ -240,7 +240,7 @@ int import_make_verification_jobs(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_job_new(&checksum_job, checksum_url, glue, userdata);
|
||||
r = pull_job_new(&checksum_job, checksum_url, glue, userdata);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -256,7 +256,7 @@ int import_make_verification_jobs(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_job_new(&signature_job, signature_url, glue, userdata);
|
||||
r = pull_job_new(&signature_job, signature_url, glue, userdata);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -272,10 +272,10 @@ int import_make_verification_jobs(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_verify(
|
||||
ImportJob *main_job,
|
||||
ImportJob *checksum_job,
|
||||
ImportJob *signature_job) {
|
||||
int pull_verify(
|
||||
PullJob *main_job,
|
||||
PullJob *checksum_job,
|
||||
PullJob *signature_job) {
|
||||
|
||||
_cleanup_close_pair_ int gpg_pipe[2] = { -1, -1 };
|
||||
_cleanup_free_ char *fn = NULL;
|
||||
@ -287,14 +287,14 @@ int import_verify(
|
||||
int r;
|
||||
|
||||
assert(main_job);
|
||||
assert(main_job->state == IMPORT_JOB_DONE);
|
||||
assert(main_job->state == PULL_JOB_DONE);
|
||||
|
||||
if (!checksum_job)
|
||||
return 0;
|
||||
|
||||
assert(main_job->calc_checksum);
|
||||
assert(main_job->checksum);
|
||||
assert(checksum_job->state == IMPORT_JOB_DONE);
|
||||
assert(checksum_job->state == PULL_JOB_DONE);
|
||||
|
||||
if (!checksum_job->payload || checksum_job->payload_size <= 0) {
|
||||
log_error("Checksum is empty, cannot verify.");
|
||||
@ -327,7 +327,7 @@ int import_verify(
|
||||
if (!signature_job)
|
||||
return 0;
|
||||
|
||||
assert(signature_job->state == IMPORT_JOB_DONE);
|
||||
assert(signature_job->state == PULL_JOB_DONE);
|
||||
|
||||
if (!signature_job->payload || signature_job->payload_size <= 0) {
|
||||
log_error("Signature is empty, cannot verify.");
|
||||
@ -464,7 +464,7 @@ finish:
|
||||
return r;
|
||||
}
|
||||
|
||||
int import_fork_tar(const char *path, pid_t *ret) {
|
||||
int pull_fork_tar(const char *path, pid_t *ret) {
|
||||
_cleanup_close_pair_ int pipefd[2] = { -1, -1 };
|
||||
pid_t pid;
|
||||
int r;
|
@ -23,19 +23,19 @@
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "import-job.h"
|
||||
#include "pull-job.h"
|
||||
#include "import-util.h"
|
||||
|
||||
int import_make_local_copy(const char *final, const char *root, const char *local, bool force_local);
|
||||
int pull_make_local_copy(const char *final, const char *root, const char *local, bool force_local);
|
||||
|
||||
int import_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags);
|
||||
int pull_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags);
|
||||
|
||||
int import_make_read_only_fd(int fd);
|
||||
int import_make_read_only(const char *path);
|
||||
int pull_make_read_only_fd(int fd);
|
||||
int pull_make_read_only(const char *path);
|
||||
|
||||
int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret);
|
||||
int pull_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret);
|
||||
|
||||
int import_make_verification_jobs(ImportJob **ret_checksum_job, ImportJob **ret_signature_job, ImportVerify verify, const char *url, CurlGlue *glue, ImportJobFinished on_finished, void *userdata);
|
||||
int import_verify(ImportJob *main_job, ImportJob *checksum_job, ImportJob *signature_job);
|
||||
int pull_make_verification_jobs(PullJob **ret_checksum_job, PullJob **ret_signature_job, ImportVerify verify, const char *url, CurlGlue *glue, PullJobFinished on_finished, void *userdata);
|
||||
int pull_verify(PullJob *main_job, PullJob *checksum_job, PullJob *signature_job);
|
||||
|
||||
int import_fork_tar(const char *path, pid_t *ret);
|
||||
int pull_fork_tar(const char *path, pid_t *ret);
|
@ -32,9 +32,9 @@
|
||||
#include "import-util.h"
|
||||
#include "curl-util.h"
|
||||
#include "aufs-util.h"
|
||||
#include "import-job.h"
|
||||
#include "import-common.h"
|
||||
#include "import-dkr.h"
|
||||
#include "pull-job.h"
|
||||
#include "pull-common.h"
|
||||
#include "pull-dkr.h"
|
||||
|
||||
typedef enum DkrProgress {
|
||||
DKR_SEARCHING,
|
||||
@ -44,18 +44,18 @@ typedef enum DkrProgress {
|
||||
DKR_COPYING,
|
||||
} DkrProgress;
|
||||
|
||||
struct DkrImport {
|
||||
struct DkrPull {
|
||||
sd_event *event;
|
||||
CurlGlue *glue;
|
||||
|
||||
char *index_url;
|
||||
char *image_root;
|
||||
|
||||
ImportJob *images_job;
|
||||
ImportJob *tags_job;
|
||||
ImportJob *ancestry_job;
|
||||
ImportJob *json_job;
|
||||
ImportJob *layer_job;
|
||||
PullJob *images_job;
|
||||
PullJob *tags_job;
|
||||
PullJob *ancestry_job;
|
||||
PullJob *json_job;
|
||||
PullJob *layer_job;
|
||||
|
||||
char *name;
|
||||
char *tag;
|
||||
@ -68,7 +68,7 @@ struct DkrImport {
|
||||
unsigned n_ancestry;
|
||||
unsigned current_ancestry;
|
||||
|
||||
DkrImportFinished on_finished;
|
||||
DkrPullFinished on_finished;
|
||||
void *userdata;
|
||||
|
||||
char *local;
|
||||
@ -88,9 +88,9 @@ struct DkrImport {
|
||||
|
||||
#define LAYERS_MAX 2048
|
||||
|
||||
static void dkr_import_job_on_finished(ImportJob *j);
|
||||
static void dkr_pull_job_on_finished(PullJob *j);
|
||||
|
||||
DkrImport* dkr_import_unref(DkrImport *i) {
|
||||
DkrPull* dkr_pull_unref(DkrPull *i) {
|
||||
if (!i)
|
||||
return NULL;
|
||||
|
||||
@ -99,11 +99,11 @@ DkrImport* dkr_import_unref(DkrImport *i) {
|
||||
(void) wait_for_terminate(i->tar_pid, NULL);
|
||||
}
|
||||
|
||||
import_job_unref(i->images_job);
|
||||
import_job_unref(i->tags_job);
|
||||
import_job_unref(i->ancestry_job);
|
||||
import_job_unref(i->json_job);
|
||||
import_job_unref(i->layer_job);
|
||||
pull_job_unref(i->images_job);
|
||||
pull_job_unref(i->tags_job);
|
||||
pull_job_unref(i->ancestry_job);
|
||||
pull_job_unref(i->json_job);
|
||||
pull_job_unref(i->layer_job);
|
||||
|
||||
curl_glue_unref(i->glue);
|
||||
sd_event_unref(i->event);
|
||||
@ -129,15 +129,15 @@ DkrImport* dkr_import_unref(DkrImport *i) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int dkr_import_new(
|
||||
DkrImport **ret,
|
||||
int dkr_pull_new(
|
||||
DkrPull **ret,
|
||||
sd_event *event,
|
||||
const char *index_url,
|
||||
const char *image_root,
|
||||
DkrImportFinished on_finished,
|
||||
DkrPullFinished on_finished,
|
||||
void *userdata) {
|
||||
|
||||
_cleanup_(dkr_import_unrefp) DkrImport *i = NULL;
|
||||
_cleanup_(dkr_pull_unrefp) DkrPull *i = NULL;
|
||||
char *e;
|
||||
int r;
|
||||
|
||||
@ -147,7 +147,7 @@ int dkr_import_new(
|
||||
if (!http_url_is_valid(index_url))
|
||||
return -EINVAL;
|
||||
|
||||
i = new0(DkrImport, 1);
|
||||
i = new0(DkrPull, 1);
|
||||
if (!i)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -180,7 +180,7 @@ int dkr_import_new(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->glue->on_finished = import_job_curl_on_finished;
|
||||
i->glue->on_finished = pull_job_curl_on_finished;
|
||||
i->glue->userdata = i;
|
||||
|
||||
*ret = i;
|
||||
@ -189,7 +189,7 @@ int dkr_import_new(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dkr_import_report_progress(DkrImport *i, DkrProgress p) {
|
||||
static void dkr_pull_report_progress(DkrPull *i, DkrProgress p) {
|
||||
unsigned percent;
|
||||
|
||||
assert(i);
|
||||
@ -375,7 +375,7 @@ static int parse_ancestry(const void *payload, size_t size, char ***ret) {
|
||||
}
|
||||
}
|
||||
|
||||
static const char *dkr_import_current_layer(DkrImport *i) {
|
||||
static const char *dkr_pull_current_layer(DkrPull *i) {
|
||||
assert(i);
|
||||
|
||||
if (strv_isempty(i->ancestry))
|
||||
@ -384,7 +384,7 @@ static const char *dkr_import_current_layer(DkrImport *i) {
|
||||
return i->ancestry[i->current_ancestry];
|
||||
}
|
||||
|
||||
static const char *dkr_import_current_base_layer(DkrImport *i) {
|
||||
static const char *dkr_pull_current_base_layer(DkrPull *i) {
|
||||
assert(i);
|
||||
|
||||
if (strv_isempty(i->ancestry))
|
||||
@ -396,7 +396,7 @@ static const char *dkr_import_current_base_layer(DkrImport *i) {
|
||||
return i->ancestry[i->current_ancestry-1];
|
||||
}
|
||||
|
||||
static int dkr_import_add_token(DkrImport *i, ImportJob *j) {
|
||||
static int dkr_pull_add_token(DkrPull *i, PullJob *j) {
|
||||
const char *t;
|
||||
|
||||
assert(i);
|
||||
@ -414,32 +414,32 @@ static int dkr_import_add_token(DkrImport *i, ImportJob *j) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dkr_import_is_done(DkrImport *i) {
|
||||
static bool dkr_pull_is_done(DkrPull *i) {
|
||||
assert(i);
|
||||
assert(i->images_job);
|
||||
|
||||
if (i->images_job->state != IMPORT_JOB_DONE)
|
||||
if (i->images_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
if (!i->tags_job || i->tags_job->state != IMPORT_JOB_DONE)
|
||||
if (!i->tags_job || i->tags_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
if (!i->ancestry_job || i->ancestry_job->state != IMPORT_JOB_DONE)
|
||||
if (!i->ancestry_job || i->ancestry_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
if (!i->json_job || i->json_job->state != IMPORT_JOB_DONE)
|
||||
if (!i->json_job || i->json_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
if (i->layer_job && i->layer_job->state != IMPORT_JOB_DONE)
|
||||
if (i->layer_job && i->layer_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
if (dkr_import_current_layer(i))
|
||||
if (dkr_pull_current_layer(i))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int dkr_import_make_local_copy(DkrImport *i) {
|
||||
static int dkr_pull_make_local_copy(DkrPull *i) {
|
||||
int r;
|
||||
|
||||
assert(i);
|
||||
@ -453,16 +453,16 @@ static int dkr_import_make_local_copy(DkrImport *i) {
|
||||
return log_oom();
|
||||
}
|
||||
|
||||
r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
|
||||
r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dkr_import_job_on_open_disk(ImportJob *j) {
|
||||
static int dkr_pull_job_on_open_disk(PullJob *j) {
|
||||
const char *base;
|
||||
DkrImport *i;
|
||||
DkrPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -480,7 +480,7 @@ static int dkr_import_job_on_open_disk(ImportJob *j) {
|
||||
|
||||
mkdir_parents_label(i->temp_path, 0700);
|
||||
|
||||
base = dkr_import_current_base_layer(i);
|
||||
base = dkr_pull_current_base_layer(i);
|
||||
if (base) {
|
||||
const char *base_path;
|
||||
|
||||
@ -491,22 +491,22 @@ static int dkr_import_job_on_open_disk(ImportJob *j) {
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to make btrfs subvolume %s: %m", i->temp_path);
|
||||
|
||||
j->disk_fd = import_fork_tar(i->temp_path, &i->tar_pid);
|
||||
j->disk_fd = pull_fork_tar(i->temp_path, &i->tar_pid);
|
||||
if (j->disk_fd < 0)
|
||||
return j->disk_fd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dkr_import_job_on_progress(ImportJob *j) {
|
||||
DkrImport *i;
|
||||
static void dkr_pull_job_on_progress(PullJob *j) {
|
||||
DkrPull *i;
|
||||
|
||||
assert(j);
|
||||
assert(j->userdata);
|
||||
|
||||
i = j->userdata;
|
||||
|
||||
dkr_import_report_progress(
|
||||
dkr_pull_report_progress(
|
||||
i,
|
||||
j == i->images_job ? DKR_SEARCHING :
|
||||
j == i->tags_job ? DKR_RESOLVING :
|
||||
@ -514,7 +514,7 @@ static void dkr_import_job_on_progress(ImportJob *j) {
|
||||
DKR_DOWNLOADING);
|
||||
}
|
||||
|
||||
static int dkr_import_pull_layer(DkrImport *i) {
|
||||
static int dkr_pull_pull_layer(DkrPull *i) {
|
||||
_cleanup_free_ char *path = NULL;
|
||||
const char *url, *layer = NULL;
|
||||
int r;
|
||||
@ -525,7 +525,7 @@ static int dkr_import_pull_layer(DkrImport *i) {
|
||||
assert(!i->final_path);
|
||||
|
||||
for (;;) {
|
||||
layer = dkr_import_current_layer(i);
|
||||
layer = dkr_pull_current_layer(i);
|
||||
if (!layer)
|
||||
return 0; /* no more layers */
|
||||
|
||||
@ -554,28 +554,28 @@ static int dkr_import_pull_layer(DkrImport *i) {
|
||||
path = NULL;
|
||||
|
||||
url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", layer, "/layer");
|
||||
r = import_job_new(&i->layer_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->layer_job, url, i->glue, i);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to allocate layer job: %m");
|
||||
|
||||
r = dkr_import_add_token(i, i->layer_job);
|
||||
r = dkr_pull_add_token(i, i->layer_job);
|
||||
if (r < 0)
|
||||
return log_oom();
|
||||
|
||||
i->layer_job->on_finished = dkr_import_job_on_finished;
|
||||
i->layer_job->on_open_disk = dkr_import_job_on_open_disk;
|
||||
i->layer_job->on_progress = dkr_import_job_on_progress;
|
||||
i->layer_job->on_finished = dkr_pull_job_on_finished;
|
||||
i->layer_job->on_open_disk = dkr_pull_job_on_open_disk;
|
||||
i->layer_job->on_progress = dkr_pull_job_on_progress;
|
||||
i->layer_job->grow_machine_directory = i->grow_machine_directory;
|
||||
|
||||
r = import_job_begin(i->layer_job);
|
||||
r = pull_job_begin(i->layer_job);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to start layer job: %m");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dkr_import_job_on_finished(ImportJob *j) {
|
||||
DkrImport *i;
|
||||
static void dkr_pull_job_on_finished(PullJob *j) {
|
||||
DkrPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -613,25 +613,25 @@ static void dkr_import_job_on_finished(ImportJob *j) {
|
||||
}
|
||||
|
||||
log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]);
|
||||
dkr_import_report_progress(i, DKR_RESOLVING);
|
||||
dkr_pull_report_progress(i, DKR_RESOLVING);
|
||||
|
||||
url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->tag);
|
||||
r = import_job_new(&i->tags_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->tags_job, url, i->glue, i);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to allocate tags job: %m");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
r = dkr_import_add_token(i, i->tags_job);
|
||||
r = dkr_pull_add_token(i, i->tags_job);
|
||||
if (r < 0) {
|
||||
log_oom();
|
||||
goto finish;
|
||||
}
|
||||
|
||||
i->tags_job->on_finished = dkr_import_job_on_finished;
|
||||
i->tags_job->on_progress = dkr_import_job_on_progress;
|
||||
i->tags_job->on_finished = dkr_pull_job_on_finished;
|
||||
i->tags_job->on_progress = dkr_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->tags_job);
|
||||
r = pull_job_begin(i->tags_job);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to start tags job: %m");
|
||||
goto finish;
|
||||
@ -655,47 +655,47 @@ static void dkr_import_job_on_finished(ImportJob *j) {
|
||||
i->id = id;
|
||||
|
||||
log_info("Tag lookup succeeded, resolved to layer %s.", i->id);
|
||||
dkr_import_report_progress(i, DKR_METADATA);
|
||||
dkr_pull_report_progress(i, DKR_METADATA);
|
||||
|
||||
url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/ancestry");
|
||||
r = import_job_new(&i->ancestry_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->ancestry_job, url, i->glue, i);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to allocate ancestry job: %m");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
r = dkr_import_add_token(i, i->ancestry_job);
|
||||
r = dkr_pull_add_token(i, i->ancestry_job);
|
||||
if (r < 0) {
|
||||
log_oom();
|
||||
goto finish;
|
||||
}
|
||||
|
||||
i->ancestry_job->on_finished = dkr_import_job_on_finished;
|
||||
i->ancestry_job->on_progress = dkr_import_job_on_progress;
|
||||
i->ancestry_job->on_finished = dkr_pull_job_on_finished;
|
||||
i->ancestry_job->on_progress = dkr_pull_job_on_progress;
|
||||
|
||||
url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/json");
|
||||
r = import_job_new(&i->json_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->json_job, url, i->glue, i);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to allocate json job: %m");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
r = dkr_import_add_token(i, i->json_job);
|
||||
r = dkr_pull_add_token(i, i->json_job);
|
||||
if (r < 0) {
|
||||
log_oom();
|
||||
goto finish;
|
||||
}
|
||||
|
||||
i->json_job->on_finished = dkr_import_job_on_finished;
|
||||
i->json_job->on_progress = dkr_import_job_on_progress;
|
||||
i->json_job->on_finished = dkr_pull_job_on_finished;
|
||||
i->json_job->on_progress = dkr_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->ancestry_job);
|
||||
r = pull_job_begin(i->ancestry_job);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to start ancestry job: %m");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
r = import_job_begin(i->json_job);
|
||||
r = pull_job_begin(i->json_job);
|
||||
if (r < 0) {
|
||||
log_error_errno(r, "Failed to start json job: %m");
|
||||
goto finish;
|
||||
@ -730,9 +730,9 @@ static void dkr_import_job_on_finished(ImportJob *j) {
|
||||
i->n_ancestry = n;
|
||||
i->current_ancestry = 0;
|
||||
|
||||
dkr_import_report_progress(i, DKR_DOWNLOADING);
|
||||
dkr_pull_report_progress(i, DKR_DOWNLOADING);
|
||||
|
||||
r = dkr_import_pull_layer(i);
|
||||
r = dkr_pull_pull_layer(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -768,26 +768,26 @@ static void dkr_import_job_on_finished(ImportJob *j) {
|
||||
|
||||
log_info("Completed writing to layer %s.", i->final_path);
|
||||
|
||||
i->layer_job = import_job_unref(i->layer_job);
|
||||
i->layer_job = pull_job_unref(i->layer_job);
|
||||
free(i->temp_path);
|
||||
i->temp_path = NULL;
|
||||
free(i->final_path);
|
||||
i->final_path = NULL;
|
||||
|
||||
i->current_ancestry ++;
|
||||
r = dkr_import_pull_layer(i);
|
||||
r = dkr_pull_pull_layer(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
} else if (i->json_job != j)
|
||||
assert_not_reached("Got finished event for unknown curl object");
|
||||
|
||||
if (!dkr_import_is_done(i))
|
||||
if (!dkr_pull_is_done(i))
|
||||
return;
|
||||
|
||||
dkr_import_report_progress(i, DKR_COPYING);
|
||||
dkr_pull_report_progress(i, DKR_COPYING);
|
||||
|
||||
r = dkr_import_make_local_copy(i);
|
||||
r = dkr_pull_make_local_copy(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -800,10 +800,10 @@ finish:
|
||||
sd_event_exit(i->event, r);
|
||||
}
|
||||
|
||||
static int dkr_import_job_on_header(ImportJob *j, const char *header, size_t sz) {
|
||||
static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) {
|
||||
_cleanup_free_ char *registry = NULL;
|
||||
char *token;
|
||||
DkrImport *i;
|
||||
DkrPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -845,7 +845,7 @@ static int dkr_import_job_on_header(ImportJob *j, const char *header, size_t sz)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dkr_import_pull(DkrImport *i, const char *name, const char *tag, const char *local, bool force_local) {
|
||||
int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *local, bool force_local) {
|
||||
const char *url;
|
||||
int r;
|
||||
|
||||
@ -880,17 +880,17 @@ int dkr_import_pull(DkrImport *i, const char *name, const char *tag, const char
|
||||
|
||||
url = strjoina(i->index_url, "/v1/repositories/", name, "/images");
|
||||
|
||||
r = import_job_new(&i->images_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->images_job, url, i->glue, i);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dkr_import_add_token(i, i->images_job);
|
||||
r = dkr_pull_add_token(i, i->images_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->images_job->on_finished = dkr_import_job_on_finished;
|
||||
i->images_job->on_header = dkr_import_job_on_header;
|
||||
i->images_job->on_progress = dkr_import_job_on_progress;
|
||||
i->images_job->on_finished = dkr_pull_job_on_finished;
|
||||
i->images_job->on_header = dkr_pull_job_on_header;
|
||||
i->images_job->on_progress = dkr_pull_job_on_progress;
|
||||
|
||||
return import_job_begin(i->images_job);
|
||||
return pull_job_begin(i->images_job);
|
||||
}
|
@ -24,13 +24,13 @@
|
||||
#include "sd-event.h"
|
||||
#include "util.h"
|
||||
|
||||
typedef struct DkrImport DkrImport;
|
||||
typedef struct DkrPull DkrPull;
|
||||
|
||||
typedef void (*DkrImportFinished)(DkrImport *import, int error, void *userdata);
|
||||
typedef void (*DkrPullFinished)(DkrPull *pull, int error, void *userdata);
|
||||
|
||||
int dkr_import_new(DkrImport **import, sd_event *event, const char *index_url, const char *image_root, DkrImportFinished on_finished, void *userdata);
|
||||
DkrImport* dkr_import_unref(DkrImport *import);
|
||||
int dkr_pull_new(DkrPull **pull, sd_event *event, const char *index_url, const char *image_root, DkrPullFinished on_finished, void *userdata);
|
||||
DkrPull* dkr_pull_unref(DkrPull *pull);
|
||||
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImport*, dkr_import_unref);
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(DkrPull*, dkr_pull_unref);
|
||||
|
||||
int dkr_import_pull(DkrImport *import, const char *name, const char *tag, const char *local, bool force_local);
|
||||
int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local);
|
@ -23,12 +23,12 @@
|
||||
|
||||
#include "strv.h"
|
||||
#include "machine-pool.h"
|
||||
#include "import-job.h"
|
||||
#include "pull-job.h"
|
||||
|
||||
/* Grow the /var/lib/machines directory after each 10MiB written */
|
||||
#define IMPORT_GROW_INTERVAL_BYTES (UINT64_C(10) * UINT64_C(1024) * UINT64_C(1024))
|
||||
#define PULL_GROW_INTERVAL_BYTES (UINT64_C(10) * UINT64_C(1024) * UINT64_C(1024))
|
||||
|
||||
ImportJob* import_job_unref(ImportJob *j) {
|
||||
PullJob* pull_job_unref(PullJob *j) {
|
||||
if (!j)
|
||||
return NULL;
|
||||
|
||||
@ -37,11 +37,11 @@ ImportJob* import_job_unref(ImportJob *j) {
|
||||
|
||||
safe_close(j->disk_fd);
|
||||
|
||||
if (j->compressed == IMPORT_JOB_XZ)
|
||||
if (j->compressed == PULL_JOB_XZ)
|
||||
lzma_end(&j->xz);
|
||||
else if (j->compressed == IMPORT_JOB_GZIP)
|
||||
else if (j->compressed == PULL_JOB_GZIP)
|
||||
inflateEnd(&j->gzip);
|
||||
else if (j->compressed == IMPORT_JOB_BZIP2)
|
||||
else if (j->compressed == PULL_JOB_BZIP2)
|
||||
BZ2_bzDecompressEnd(&j->bzip2);
|
||||
|
||||
if (j->checksum_context)
|
||||
@ -58,19 +58,19 @@ ImportJob* import_job_unref(ImportJob *j) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void import_job_finish(ImportJob *j, int ret) {
|
||||
static void pull_job_finish(PullJob *j, int ret) {
|
||||
assert(j);
|
||||
|
||||
if (j->state == IMPORT_JOB_DONE ||
|
||||
j->state == IMPORT_JOB_FAILED)
|
||||
if (j->state == PULL_JOB_DONE ||
|
||||
j->state == PULL_JOB_FAILED)
|
||||
return;
|
||||
|
||||
if (ret == 0) {
|
||||
j->state = IMPORT_JOB_DONE;
|
||||
j->state = PULL_JOB_DONE;
|
||||
j->progress_percent = 100;
|
||||
log_info("Download of %s complete.", j->url);
|
||||
} else {
|
||||
j->state = IMPORT_JOB_FAILED;
|
||||
j->state = PULL_JOB_FAILED;
|
||||
j->error = ret;
|
||||
}
|
||||
|
||||
@ -78,8 +78,8 @@ static void import_job_finish(ImportJob *j, int ret) {
|
||||
j->on_finished(j);
|
||||
}
|
||||
|
||||
void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
|
||||
ImportJob *j = NULL;
|
||||
void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
|
||||
PullJob *j = NULL;
|
||||
CURLcode code;
|
||||
long status;
|
||||
int r;
|
||||
@ -87,7 +87,7 @@ void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
|
||||
if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK)
|
||||
return;
|
||||
|
||||
if (!j || j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED)
|
||||
if (!j || j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED)
|
||||
return;
|
||||
|
||||
if (result != CURLE_OK) {
|
||||
@ -116,7 +116,7 @@ void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (j->state != IMPORT_JOB_RUNNING) {
|
||||
if (j->state != PULL_JOB_RUNNING) {
|
||||
log_error("Premature connection termination.");
|
||||
r = -EIO;
|
||||
goto finish;
|
||||
@ -177,10 +177,10 @@ void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
|
||||
r = 0;
|
||||
|
||||
finish:
|
||||
import_job_finish(j, r);
|
||||
pull_job_finish(j, r);
|
||||
}
|
||||
|
||||
static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) {
|
||||
static int pull_job_write_uncompressed(PullJob *j, void *p, size_t sz) {
|
||||
ssize_t n;
|
||||
|
||||
assert(j);
|
||||
@ -201,7 +201,7 @@ static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) {
|
||||
|
||||
if (j->disk_fd >= 0) {
|
||||
|
||||
if (j->grow_machine_directory && j->written_since_last_grow >= IMPORT_GROW_INTERVAL_BYTES) {
|
||||
if (j->grow_machine_directory && j->written_since_last_grow >= PULL_GROW_INTERVAL_BYTES) {
|
||||
j->written_since_last_grow = 0;
|
||||
grow_machine_directory();
|
||||
}
|
||||
@ -233,7 +233,7 @@ static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) {
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -263,14 +263,14 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
|
||||
switch (j->compressed) {
|
||||
|
||||
case IMPORT_JOB_UNCOMPRESSED:
|
||||
r = import_job_write_uncompressed(j, p, sz);
|
||||
case PULL_JOB_UNCOMPRESSED:
|
||||
r = pull_job_write_uncompressed(j, p, sz);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
break;
|
||||
|
||||
case IMPORT_JOB_XZ:
|
||||
case PULL_JOB_XZ:
|
||||
j->xz.next_in = p;
|
||||
j->xz.avail_in = sz;
|
||||
|
||||
@ -287,14 +287,14 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out);
|
||||
r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case IMPORT_JOB_GZIP:
|
||||
case PULL_JOB_GZIP:
|
||||
j->gzip.next_in = p;
|
||||
j->gzip.avail_in = sz;
|
||||
|
||||
@ -310,14 +310,14 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out);
|
||||
r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case IMPORT_JOB_BZIP2:
|
||||
case PULL_JOB_BZIP2:
|
||||
j->bzip2.next_in = p;
|
||||
j->bzip2.avail_in = sz;
|
||||
|
||||
@ -333,7 +333,7 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->bzip2.avail_out);
|
||||
r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->bzip2.avail_out);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
@ -349,7 +349,7 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int import_job_open_disk(ImportJob *j) {
|
||||
static int pull_job_open_disk(PullJob *j) {
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -383,7 +383,7 @@ static int import_job_open_disk(ImportJob *j) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int import_job_detect_compression(ImportJob *j) {
|
||||
static int pull_job_detect_compression(PullJob *j) {
|
||||
static const uint8_t xz_signature[] = {
|
||||
0xfd, '7', 'z', 'X', 'Z', 0x00
|
||||
};
|
||||
@ -407,19 +407,19 @@ static int import_job_detect_compression(ImportJob *j) {
|
||||
return 0;
|
||||
|
||||
if (memcmp(j->payload, xz_signature, sizeof(xz_signature)) == 0)
|
||||
j->compressed = IMPORT_JOB_XZ;
|
||||
j->compressed = PULL_JOB_XZ;
|
||||
else if (memcmp(j->payload, gzip_signature, sizeof(gzip_signature)) == 0)
|
||||
j->compressed = IMPORT_JOB_GZIP;
|
||||
j->compressed = PULL_JOB_GZIP;
|
||||
else if (memcmp(j->payload, bzip2_signature, sizeof(bzip2_signature)) == 0)
|
||||
j->compressed = IMPORT_JOB_BZIP2;
|
||||
j->compressed = PULL_JOB_BZIP2;
|
||||
else
|
||||
j->compressed = IMPORT_JOB_UNCOMPRESSED;
|
||||
j->compressed = PULL_JOB_UNCOMPRESSED;
|
||||
|
||||
log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == IMPORT_JOB_XZ));
|
||||
log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == IMPORT_JOB_GZIP));
|
||||
log_debug("Stream is BZIP2 compressed: %s", yes_no(j->compressed == IMPORT_JOB_BZIP2));
|
||||
log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == PULL_JOB_XZ));
|
||||
log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == PULL_JOB_GZIP));
|
||||
log_debug("Stream is BZIP2 compressed: %s", yes_no(j->compressed == PULL_JOB_BZIP2));
|
||||
|
||||
if (j->compressed == IMPORT_JOB_XZ) {
|
||||
if (j->compressed == PULL_JOB_XZ) {
|
||||
lzma_ret xzr;
|
||||
|
||||
xzr = lzma_stream_decoder(&j->xz, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK);
|
||||
@ -428,14 +428,14 @@ static int import_job_detect_compression(ImportJob *j) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
if (j->compressed == IMPORT_JOB_GZIP) {
|
||||
if (j->compressed == PULL_JOB_GZIP) {
|
||||
r = inflateInit2(&j->gzip, 15+16);
|
||||
if (r != Z_OK) {
|
||||
log_error("Failed to initialize gzip decoder.");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
if (j->compressed == IMPORT_JOB_BZIP2) {
|
||||
if (j->compressed == PULL_JOB_BZIP2) {
|
||||
r = BZ2_bzDecompressInit(&j->bzip2, 0, 0);
|
||||
if (r != BZ_OK) {
|
||||
log_error("Failed to initialize bzip2 decoder.");
|
||||
@ -443,7 +443,7 @@ static int import_job_detect_compression(ImportJob *j) {
|
||||
}
|
||||
}
|
||||
|
||||
r = import_job_open_disk(j);
|
||||
r = pull_job_open_disk(j);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
@ -455,17 +455,17 @@ static int import_job_detect_compression(ImportJob *j) {
|
||||
j->payload_size = 0;
|
||||
j->payload_allocated = 0;
|
||||
|
||||
j->state = IMPORT_JOB_RUNNING;
|
||||
j->state = PULL_JOB_RUNNING;
|
||||
|
||||
r = import_job_write_compressed(j, stub, stub_size);
|
||||
r = pull_job_write_compressed(j, stub, stub_size);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
|
||||
ImportJob *j = userdata;
|
||||
static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
|
||||
PullJob *j = userdata;
|
||||
size_t sz = size * nmemb;
|
||||
int r;
|
||||
|
||||
@ -474,7 +474,7 @@ static size_t import_job_write_callback(void *contents, size_t size, size_t nmem
|
||||
|
||||
switch (j->state) {
|
||||
|
||||
case IMPORT_JOB_ANALYZING:
|
||||
case PULL_JOB_ANALYZING:
|
||||
/* Let's first check what it actually is */
|
||||
|
||||
if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
|
||||
@ -485,22 +485,22 @@ static size_t import_job_write_callback(void *contents, size_t size, size_t nmem
|
||||
memcpy(j->payload + j->payload_size, contents, sz);
|
||||
j->payload_size += sz;
|
||||
|
||||
r = import_job_detect_compression(j);
|
||||
r = pull_job_detect_compression(j);
|
||||
if (r < 0)
|
||||
goto fail;
|
||||
|
||||
break;
|
||||
|
||||
case IMPORT_JOB_RUNNING:
|
||||
case PULL_JOB_RUNNING:
|
||||
|
||||
r = import_job_write_compressed(j, contents, sz);
|
||||
r = pull_job_write_compressed(j, contents, sz);
|
||||
if (r < 0)
|
||||
goto fail;
|
||||
|
||||
break;
|
||||
|
||||
case IMPORT_JOB_DONE:
|
||||
case IMPORT_JOB_FAILED:
|
||||
case PULL_JOB_DONE:
|
||||
case PULL_JOB_FAILED:
|
||||
r = -ESTALE;
|
||||
goto fail;
|
||||
|
||||
@ -511,12 +511,12 @@ static size_t import_job_write_callback(void *contents, size_t size, size_t nmem
|
||||
return sz;
|
||||
|
||||
fail:
|
||||
import_job_finish(j, r);
|
||||
pull_job_finish(j, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
|
||||
ImportJob *j = userdata;
|
||||
static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
|
||||
PullJob *j = userdata;
|
||||
size_t sz = size * nmemb;
|
||||
_cleanup_free_ char *length = NULL, *last_modified = NULL;
|
||||
char *etag;
|
||||
@ -525,12 +525,12 @@ static size_t import_job_header_callback(void *contents, size_t size, size_t nme
|
||||
assert(contents);
|
||||
assert(j);
|
||||
|
||||
if (j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) {
|
||||
if (j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED) {
|
||||
r = -ESTALE;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
assert(j->state == IMPORT_JOB_ANALYZING);
|
||||
assert(j->state == PULL_JOB_ANALYZING);
|
||||
|
||||
r = curl_header_strdup(contents, sz, "ETag:", &etag);
|
||||
if (r < 0) {
|
||||
@ -544,7 +544,7 @@ static size_t import_job_header_callback(void *contents, size_t size, size_t nme
|
||||
if (strv_contains(j->old_etags, j->etag)) {
|
||||
log_info("Image already downloaded. Skipping download.");
|
||||
j->etag_exists = true;
|
||||
import_job_finish(j, 0);
|
||||
pull_job_finish(j, 0);
|
||||
return sz;
|
||||
}
|
||||
|
||||
@ -593,12 +593,12 @@ static size_t import_job_header_callback(void *contents, size_t size, size_t nme
|
||||
return sz;
|
||||
|
||||
fail:
|
||||
import_job_finish(j, r);
|
||||
pull_job_finish(j, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
|
||||
ImportJob *j = userdata;
|
||||
static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
|
||||
PullJob *j = userdata;
|
||||
unsigned percent;
|
||||
usec_t n;
|
||||
|
||||
@ -640,18 +640,18 @@ static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_job_new(ImportJob **ret, const char *url, CurlGlue *glue, void *userdata) {
|
||||
_cleanup_(import_job_unrefp) ImportJob *j = NULL;
|
||||
int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata) {
|
||||
_cleanup_(pull_job_unrefp) PullJob *j = NULL;
|
||||
|
||||
assert(url);
|
||||
assert(glue);
|
||||
assert(ret);
|
||||
|
||||
j = new0(ImportJob, 1);
|
||||
j = new0(PullJob, 1);
|
||||
if (!j)
|
||||
return -ENOMEM;
|
||||
|
||||
j->state = IMPORT_JOB_INIT;
|
||||
j->state = PULL_JOB_INIT;
|
||||
j->disk_fd = -1;
|
||||
j->userdata = userdata;
|
||||
j->glue = glue;
|
||||
@ -669,12 +669,12 @@ int import_job_new(ImportJob **ret, const char *url, CurlGlue *glue, void *userd
|
||||
return 0;
|
||||
}
|
||||
|
||||
int import_job_begin(ImportJob *j) {
|
||||
int pull_job_begin(PullJob *j) {
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
|
||||
if (j->state != IMPORT_JOB_INIT)
|
||||
if (j->state != PULL_JOB_INIT)
|
||||
return -EBUSY;
|
||||
|
||||
if (j->grow_machine_directory)
|
||||
@ -715,19 +715,19 @@ int import_job_begin(ImportJob *j) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, import_job_write_callback) != CURLE_OK)
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, pull_job_write_callback) != CURLE_OK)
|
||||
return -EIO;
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
|
||||
return -EIO;
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, import_job_header_callback) != CURLE_OK)
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, pull_job_header_callback) != CURLE_OK)
|
||||
return -EIO;
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
|
||||
return -EIO;
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, import_job_progress_callback) != CURLE_OK)
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, pull_job_progress_callback) != CURLE_OK)
|
||||
return -EIO;
|
||||
|
||||
if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK)
|
||||
@ -740,7 +740,7 @@ int import_job_begin(ImportJob *j) {
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
j->state = IMPORT_JOB_ANALYZING;
|
||||
j->state = PULL_JOB_ANALYZING;
|
||||
|
||||
return 0;
|
||||
}
|
@ -29,45 +29,45 @@
|
||||
#include "macro.h"
|
||||
#include "curl-util.h"
|
||||
|
||||
typedef struct ImportJob ImportJob;
|
||||
typedef struct PullJob PullJob;
|
||||
|
||||
typedef void (*ImportJobFinished)(ImportJob *job);
|
||||
typedef int (*ImportJobOpenDisk)(ImportJob *job);
|
||||
typedef int (*ImportJobHeader)(ImportJob *job, const char *header, size_t sz);
|
||||
typedef void (*ImportJobProgress)(ImportJob *job);
|
||||
typedef void (*PullJobFinished)(PullJob *job);
|
||||
typedef int (*PullJobOpenDisk)(PullJob *job);
|
||||
typedef int (*PullJobHeader)(PullJob *job, const char *header, size_t sz);
|
||||
typedef void (*PullJobProgress)(PullJob *job);
|
||||
|
||||
typedef enum ImportJobState {
|
||||
IMPORT_JOB_INIT,
|
||||
IMPORT_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */
|
||||
IMPORT_JOB_RUNNING, /* Writing to destination */
|
||||
IMPORT_JOB_DONE,
|
||||
IMPORT_JOB_FAILED,
|
||||
_IMPORT_JOB_STATE_MAX,
|
||||
_IMPORT_JOB_STATE_INVALID = -1,
|
||||
} ImportJobState;
|
||||
typedef enum PullJobState {
|
||||
PULL_JOB_INIT,
|
||||
PULL_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */
|
||||
PULL_JOB_RUNNING, /* Writing to destination */
|
||||
PULL_JOB_DONE,
|
||||
PULL_JOB_FAILED,
|
||||
_PULL_JOB_STATE_MAX,
|
||||
_PULL_JOB_STATE_INVALID = -1,
|
||||
} PullJobState;
|
||||
|
||||
#define IMPORT_JOB_STATE_IS_COMPLETE(j) (IN_SET((j)->state, IMPORT_JOB_DONE, IMPORT_JOB_FAILED))
|
||||
#define PULL_JOB_STATE_IS_COMPLETE(j) (IN_SET((j)->state, PULL_JOB_DONE, PULL_JOB_FAILED))
|
||||
|
||||
typedef enum ImportJobCompression {
|
||||
IMPORT_JOB_UNCOMPRESSED,
|
||||
IMPORT_JOB_XZ,
|
||||
IMPORT_JOB_GZIP,
|
||||
IMPORT_JOB_BZIP2,
|
||||
_IMPORT_JOB_COMPRESSION_MAX,
|
||||
_IMPORT_JOB_COMPRESSION_INVALID = -1,
|
||||
} ImportJobCompression;
|
||||
typedef enum PullJobCompression {
|
||||
PULL_JOB_UNCOMPRESSED,
|
||||
PULL_JOB_XZ,
|
||||
PULL_JOB_GZIP,
|
||||
PULL_JOB_BZIP2,
|
||||
_PULL_JOB_COMPRESSION_MAX,
|
||||
_PULL_JOB_COMPRESSION_INVALID = -1,
|
||||
} PullJobCompression;
|
||||
|
||||
struct ImportJob {
|
||||
ImportJobState state;
|
||||
struct PullJob {
|
||||
PullJobState state;
|
||||
int error;
|
||||
|
||||
char *url;
|
||||
|
||||
void *userdata;
|
||||
ImportJobFinished on_finished;
|
||||
ImportJobOpenDisk on_open_disk;
|
||||
ImportJobHeader on_header;
|
||||
ImportJobProgress on_progress;
|
||||
PullJobFinished on_finished;
|
||||
PullJobOpenDisk on_open_disk;
|
||||
PullJobHeader on_header;
|
||||
PullJobProgress on_progress;
|
||||
|
||||
CurlGlue *glue;
|
||||
CURL *curl;
|
||||
@ -92,7 +92,7 @@ struct ImportJob {
|
||||
|
||||
usec_t mtime;
|
||||
|
||||
ImportJobCompression compressed;
|
||||
PullJobCompression compressed;
|
||||
lzma_stream xz;
|
||||
z_stream gzip;
|
||||
bz_stream bzip2;
|
||||
@ -112,11 +112,11 @@ struct ImportJob {
|
||||
uint64_t written_since_last_grow;
|
||||
};
|
||||
|
||||
int import_job_new(ImportJob **job, const char *url, CurlGlue *glue, void *userdata);
|
||||
ImportJob* import_job_unref(ImportJob *job);
|
||||
int pull_job_new(PullJob **job, const char *url, CurlGlue *glue, void *userdata);
|
||||
PullJob* pull_job_unref(PullJob *job);
|
||||
|
||||
int import_job_begin(ImportJob *j);
|
||||
int pull_job_begin(PullJob *j);
|
||||
|
||||
void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result);
|
||||
void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result);
|
||||
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(ImportJob*, import_job_unref);
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(PullJob*, pull_job_unref);
|
@ -35,9 +35,9 @@
|
||||
#include "import-util.h"
|
||||
#include "curl-util.h"
|
||||
#include "qcow2-util.h"
|
||||
#include "import-job.h"
|
||||
#include "import-common.h"
|
||||
#include "import-raw.h"
|
||||
#include "pull-job.h"
|
||||
#include "pull-common.h"
|
||||
#include "pull-raw.h"
|
||||
|
||||
typedef enum RawProgress {
|
||||
RAW_DOWNLOADING,
|
||||
@ -47,17 +47,17 @@ typedef enum RawProgress {
|
||||
RAW_COPYING,
|
||||
} RawProgress;
|
||||
|
||||
struct RawImport {
|
||||
struct RawPull {
|
||||
sd_event *event;
|
||||
CurlGlue *glue;
|
||||
|
||||
char *image_root;
|
||||
|
||||
ImportJob *raw_job;
|
||||
ImportJob *checksum_job;
|
||||
ImportJob *signature_job;
|
||||
PullJob *raw_job;
|
||||
PullJob *checksum_job;
|
||||
PullJob *signature_job;
|
||||
|
||||
RawImportFinished on_finished;
|
||||
RawPullFinished on_finished;
|
||||
void *userdata;
|
||||
|
||||
char *local;
|
||||
@ -70,13 +70,13 @@ struct RawImport {
|
||||
ImportVerify verify;
|
||||
};
|
||||
|
||||
RawImport* raw_import_unref(RawImport *i) {
|
||||
RawPull* raw_pull_unref(RawPull *i) {
|
||||
if (!i)
|
||||
return NULL;
|
||||
|
||||
import_job_unref(i->raw_job);
|
||||
import_job_unref(i->checksum_job);
|
||||
import_job_unref(i->signature_job);
|
||||
pull_job_unref(i->raw_job);
|
||||
pull_job_unref(i->checksum_job);
|
||||
pull_job_unref(i->signature_job);
|
||||
|
||||
curl_glue_unref(i->glue);
|
||||
sd_event_unref(i->event);
|
||||
@ -94,19 +94,19 @@ RawImport* raw_import_unref(RawImport *i) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int raw_import_new(
|
||||
RawImport **ret,
|
||||
int raw_pull_new(
|
||||
RawPull **ret,
|
||||
sd_event *event,
|
||||
const char *image_root,
|
||||
RawImportFinished on_finished,
|
||||
RawPullFinished on_finished,
|
||||
void *userdata) {
|
||||
|
||||
_cleanup_(raw_import_unrefp) RawImport *i = NULL;
|
||||
_cleanup_(raw_pull_unrefp) RawPull *i = NULL;
|
||||
int r;
|
||||
|
||||
assert(ret);
|
||||
|
||||
i = new0(RawImport, 1);
|
||||
i = new0(RawPull, 1);
|
||||
if (!i)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -131,7 +131,7 @@ int raw_import_new(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->glue->on_finished = import_job_curl_on_finished;
|
||||
i->glue->on_finished = pull_job_curl_on_finished;
|
||||
i->glue->userdata = i;
|
||||
|
||||
*ret = i;
|
||||
@ -140,7 +140,7 @@ int raw_import_new(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void raw_import_report_progress(RawImport *i, RawProgress p) {
|
||||
static void raw_pull_report_progress(RawPull *i, RawProgress p) {
|
||||
unsigned percent;
|
||||
|
||||
assert(i);
|
||||
@ -191,7 +191,7 @@ static void raw_import_report_progress(RawImport *i, RawProgress p) {
|
||||
log_debug("Combined progress %u%%", percent);
|
||||
}
|
||||
|
||||
static int raw_import_maybe_convert_qcow2(RawImport *i) {
|
||||
static int raw_pull_maybe_convert_qcow2(RawPull *i) {
|
||||
_cleanup_close_ int converted_fd = -1;
|
||||
_cleanup_free_ char *t = NULL;
|
||||
int r;
|
||||
@ -239,7 +239,7 @@ static int raw_import_maybe_convert_qcow2(RawImport *i) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int raw_import_make_local_copy(RawImport *i) {
|
||||
static int raw_pull_make_local_copy(RawPull *i) {
|
||||
_cleanup_free_ char *tp = NULL;
|
||||
_cleanup_close_ int dfd = -1;
|
||||
const char *p;
|
||||
@ -257,7 +257,7 @@ static int raw_import_make_local_copy(RawImport *i) {
|
||||
assert(i->raw_job->disk_fd < 0);
|
||||
|
||||
if (!i->final_path) {
|
||||
r = import_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path);
|
||||
r = pull_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path);
|
||||
if (r < 0)
|
||||
return log_oom();
|
||||
}
|
||||
@ -318,22 +318,22 @@ static int raw_import_make_local_copy(RawImport *i) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool raw_import_is_done(RawImport *i) {
|
||||
static bool raw_pull_is_done(RawPull *i) {
|
||||
assert(i);
|
||||
assert(i->raw_job);
|
||||
|
||||
if (i->raw_job->state != IMPORT_JOB_DONE)
|
||||
if (i->raw_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
if (i->checksum_job && i->checksum_job->state != IMPORT_JOB_DONE)
|
||||
if (i->checksum_job && i->checksum_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
if (i->signature_job && i->signature_job->state != IMPORT_JOB_DONE)
|
||||
if (i->signature_job && i->signature_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void raw_import_job_on_finished(ImportJob *j) {
|
||||
RawImport *i;
|
||||
static void raw_pull_job_on_finished(PullJob *j) {
|
||||
RawPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -359,28 +359,28 @@ static void raw_import_job_on_finished(ImportJob *j) {
|
||||
*
|
||||
* We only do something when we got all three files */
|
||||
|
||||
if (!raw_import_is_done(i))
|
||||
if (!raw_pull_is_done(i))
|
||||
return;
|
||||
|
||||
if (!i->raw_job->etag_exists) {
|
||||
/* This is a new download, verify it, and move it into place */
|
||||
assert(i->raw_job->disk_fd >= 0);
|
||||
|
||||
raw_import_report_progress(i, RAW_VERIFYING);
|
||||
raw_pull_report_progress(i, RAW_VERIFYING);
|
||||
|
||||
r = import_verify(i->raw_job, i->checksum_job, i->signature_job);
|
||||
r = pull_verify(i->raw_job, i->checksum_job, i->signature_job);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
raw_import_report_progress(i, RAW_UNPACKING);
|
||||
raw_pull_report_progress(i, RAW_UNPACKING);
|
||||
|
||||
r = raw_import_maybe_convert_qcow2(i);
|
||||
r = raw_pull_maybe_convert_qcow2(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
raw_import_report_progress(i, RAW_FINALIZING);
|
||||
raw_pull_report_progress(i, RAW_FINALIZING);
|
||||
|
||||
r = import_make_read_only_fd(i->raw_job->disk_fd);
|
||||
r = pull_make_read_only_fd(i->raw_job->disk_fd);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -394,9 +394,9 @@ static void raw_import_job_on_finished(ImportJob *j) {
|
||||
i->temp_path = NULL;
|
||||
}
|
||||
|
||||
raw_import_report_progress(i, RAW_COPYING);
|
||||
raw_pull_report_progress(i, RAW_COPYING);
|
||||
|
||||
r = raw_import_make_local_copy(i);
|
||||
r = raw_pull_make_local_copy(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -409,8 +409,8 @@ finish:
|
||||
sd_event_exit(i->event, r);
|
||||
}
|
||||
|
||||
static int raw_import_job_on_open_disk(ImportJob *j) {
|
||||
RawImport *i;
|
||||
static int raw_pull_job_on_open_disk(PullJob *j) {
|
||||
RawPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -421,7 +421,7 @@ static int raw_import_job_on_open_disk(ImportJob *j) {
|
||||
assert(!i->final_path);
|
||||
assert(!i->temp_path);
|
||||
|
||||
r = import_make_path(j->url, j->etag, i->image_root, ".raw-", ".raw", &i->final_path);
|
||||
r = pull_make_path(j->url, j->etag, i->image_root, ".raw-", ".raw", &i->final_path);
|
||||
if (r < 0)
|
||||
return log_oom();
|
||||
|
||||
@ -442,18 +442,18 @@ static int raw_import_job_on_open_disk(ImportJob *j) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void raw_import_job_on_progress(ImportJob *j) {
|
||||
RawImport *i;
|
||||
static void raw_pull_job_on_progress(PullJob *j) {
|
||||
RawPull *i;
|
||||
|
||||
assert(j);
|
||||
assert(j->userdata);
|
||||
|
||||
i = j->userdata;
|
||||
|
||||
raw_import_report_progress(i, RAW_DOWNLOADING);
|
||||
raw_pull_report_progress(i, RAW_DOWNLOADING);
|
||||
}
|
||||
|
||||
int raw_import_pull(RawImport *i, const char *url, const char *local, bool force_local, ImportVerify verify) {
|
||||
int raw_pull_start(RawPull *i, const char *url, const char *local, bool force_local, ImportVerify verify) {
|
||||
int r;
|
||||
|
||||
assert(i);
|
||||
@ -476,40 +476,40 @@ int raw_import_pull(RawImport *i, const char *url, const char *local, bool force
|
||||
i->verify = verify;
|
||||
|
||||
/* Queue job for the image itself */
|
||||
r = import_job_new(&i->raw_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->raw_job, url, i->glue, i);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->raw_job->on_finished = raw_import_job_on_finished;
|
||||
i->raw_job->on_open_disk = raw_import_job_on_open_disk;
|
||||
i->raw_job->on_progress = raw_import_job_on_progress;
|
||||
i->raw_job->on_finished = raw_pull_job_on_finished;
|
||||
i->raw_job->on_open_disk = raw_pull_job_on_open_disk;
|
||||
i->raw_job->on_progress = raw_pull_job_on_progress;
|
||||
i->raw_job->calc_checksum = verify != IMPORT_VERIFY_NO;
|
||||
i->raw_job->grow_machine_directory = i->grow_machine_directory;
|
||||
|
||||
r = import_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags);
|
||||
r = pull_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, raw_import_job_on_finished, i);
|
||||
r = pull_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, raw_pull_job_on_finished, i);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_job_begin(i->raw_job);
|
||||
r = pull_job_begin(i->raw_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (i->checksum_job) {
|
||||
i->checksum_job->on_progress = raw_import_job_on_progress;
|
||||
i->checksum_job->on_progress = raw_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->checksum_job);
|
||||
r = pull_job_begin(i->checksum_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (i->signature_job) {
|
||||
i->signature_job->on_progress = raw_import_job_on_progress;
|
||||
i->signature_job->on_progress = raw_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->signature_job);
|
||||
r = pull_job_begin(i->signature_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
@ -25,13 +25,13 @@
|
||||
#include "macro.h"
|
||||
#include "import-util.h"
|
||||
|
||||
typedef struct RawImport RawImport;
|
||||
typedef struct RawPull RawPull;
|
||||
|
||||
typedef void (*RawImportFinished)(RawImport *import, int error, void *userdata);
|
||||
typedef void (*RawPullFinished)(RawPull *pull, int error, void *userdata);
|
||||
|
||||
int raw_import_new(RawImport **import, sd_event *event, const char *image_root, RawImportFinished on_finished, void *userdata);
|
||||
RawImport* raw_import_unref(RawImport *import);
|
||||
int raw_pull_new(RawPull **pull, sd_event *event, const char *image_root, RawPullFinished on_finished, void *userdata);
|
||||
RawPull* raw_pull_unref(RawPull *pull);
|
||||
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(RawImport*, raw_import_unref);
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(RawPull*, raw_pull_unref);
|
||||
|
||||
int raw_import_pull(RawImport *import, const char *url, const char *local, bool force_local, ImportVerify verify);
|
||||
int raw_pull_start(RawPull *pull, const char *url, const char *local, bool force_local, ImportVerify verify);
|
@ -33,9 +33,9 @@
|
||||
#include "path-util.h"
|
||||
#include "import-util.h"
|
||||
#include "curl-util.h"
|
||||
#include "import-job.h"
|
||||
#include "import-common.h"
|
||||
#include "import-tar.h"
|
||||
#include "pull-job.h"
|
||||
#include "pull-common.h"
|
||||
#include "pull-tar.h"
|
||||
|
||||
typedef enum TarProgress {
|
||||
TAR_DOWNLOADING,
|
||||
@ -44,17 +44,17 @@ typedef enum TarProgress {
|
||||
TAR_COPYING,
|
||||
} TarProgress;
|
||||
|
||||
struct TarImport {
|
||||
struct TarPull {
|
||||
sd_event *event;
|
||||
CurlGlue *glue;
|
||||
|
||||
char *image_root;
|
||||
|
||||
ImportJob *tar_job;
|
||||
ImportJob *checksum_job;
|
||||
ImportJob *signature_job;
|
||||
PullJob *tar_job;
|
||||
PullJob *checksum_job;
|
||||
PullJob *signature_job;
|
||||
|
||||
TarImportFinished on_finished;
|
||||
TarPullFinished on_finished;
|
||||
void *userdata;
|
||||
|
||||
char *local;
|
||||
@ -69,7 +69,7 @@ struct TarImport {
|
||||
ImportVerify verify;
|
||||
};
|
||||
|
||||
TarImport* tar_import_unref(TarImport *i) {
|
||||
TarPull* tar_pull_unref(TarPull *i) {
|
||||
if (!i)
|
||||
return NULL;
|
||||
|
||||
@ -78,9 +78,9 @@ TarImport* tar_import_unref(TarImport *i) {
|
||||
(void) wait_for_terminate(i->tar_pid, NULL);
|
||||
}
|
||||
|
||||
import_job_unref(i->tar_job);
|
||||
import_job_unref(i->checksum_job);
|
||||
import_job_unref(i->signature_job);
|
||||
pull_job_unref(i->tar_job);
|
||||
pull_job_unref(i->checksum_job);
|
||||
pull_job_unref(i->signature_job);
|
||||
|
||||
curl_glue_unref(i->glue);
|
||||
sd_event_unref(i->event);
|
||||
@ -99,20 +99,20 @@ TarImport* tar_import_unref(TarImport *i) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int tar_import_new(
|
||||
TarImport **ret,
|
||||
int tar_pull_new(
|
||||
TarPull **ret,
|
||||
sd_event *event,
|
||||
const char *image_root,
|
||||
TarImportFinished on_finished,
|
||||
TarPullFinished on_finished,
|
||||
void *userdata) {
|
||||
|
||||
_cleanup_(tar_import_unrefp) TarImport *i = NULL;
|
||||
_cleanup_(tar_pull_unrefp) TarPull *i = NULL;
|
||||
int r;
|
||||
|
||||
assert(ret);
|
||||
assert(event);
|
||||
|
||||
i = new0(TarImport, 1);
|
||||
i = new0(TarPull, 1);
|
||||
if (!i)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -137,7 +137,7 @@ int tar_import_new(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->glue->on_finished = import_job_curl_on_finished;
|
||||
i->glue->on_finished = pull_job_curl_on_finished;
|
||||
i->glue->userdata = i;
|
||||
|
||||
*ret = i;
|
||||
@ -146,7 +146,7 @@ int tar_import_new(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tar_import_report_progress(TarImport *i, TarProgress p) {
|
||||
static void tar_pull_report_progress(TarPull *i, TarProgress p) {
|
||||
unsigned percent;
|
||||
|
||||
assert(i);
|
||||
@ -193,7 +193,7 @@ static void tar_import_report_progress(TarImport *i, TarProgress p) {
|
||||
log_debug("Combined progress %u%%", percent);
|
||||
}
|
||||
|
||||
static int tar_import_make_local_copy(TarImport *i) {
|
||||
static int tar_pull_make_local_copy(TarPull *i) {
|
||||
int r;
|
||||
|
||||
assert(i);
|
||||
@ -203,34 +203,34 @@ static int tar_import_make_local_copy(TarImport *i) {
|
||||
return 0;
|
||||
|
||||
if (!i->final_path) {
|
||||
r = import_make_path(i->tar_job->url, i->tar_job->etag, i->image_root, ".tar-", NULL, &i->final_path);
|
||||
r = pull_make_path(i->tar_job->url, i->tar_job->etag, i->image_root, ".tar-", NULL, &i->final_path);
|
||||
if (r < 0)
|
||||
return log_oom();
|
||||
}
|
||||
|
||||
r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
|
||||
r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool tar_import_is_done(TarImport *i) {
|
||||
static bool tar_pull_is_done(TarPull *i) {
|
||||
assert(i);
|
||||
assert(i->tar_job);
|
||||
|
||||
if (i->tar_job->state != IMPORT_JOB_DONE)
|
||||
if (i->tar_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
if (i->checksum_job && i->checksum_job->state != IMPORT_JOB_DONE)
|
||||
if (i->checksum_job && i->checksum_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
if (i->signature_job && i->signature_job->state != IMPORT_JOB_DONE)
|
||||
if (i->signature_job && i->signature_job->state != PULL_JOB_DONE)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void tar_import_job_on_finished(ImportJob *j) {
|
||||
TarImport *i;
|
||||
static void tar_pull_job_on_finished(PullJob *j) {
|
||||
TarPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -253,7 +253,7 @@ static void tar_import_job_on_finished(ImportJob *j) {
|
||||
* successfully, or the download was skipped because we
|
||||
* already have the etag. */
|
||||
|
||||
if (!tar_import_is_done(i))
|
||||
if (!tar_pull_is_done(i))
|
||||
return;
|
||||
|
||||
j->disk_fd = safe_close(i->tar_job->disk_fd);
|
||||
@ -268,15 +268,15 @@ static void tar_import_job_on_finished(ImportJob *j) {
|
||||
if (!i->tar_job->etag_exists) {
|
||||
/* This is a new download, verify it, and move it into place */
|
||||
|
||||
tar_import_report_progress(i, TAR_VERIFYING);
|
||||
tar_pull_report_progress(i, TAR_VERIFYING);
|
||||
|
||||
r = import_verify(i->tar_job, i->checksum_job, i->signature_job);
|
||||
r = pull_verify(i->tar_job, i->checksum_job, i->signature_job);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
tar_import_report_progress(i, TAR_FINALIZING);
|
||||
tar_pull_report_progress(i, TAR_FINALIZING);
|
||||
|
||||
r = import_make_read_only(i->temp_path);
|
||||
r = pull_make_read_only(i->temp_path);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -289,9 +289,9 @@ static void tar_import_job_on_finished(ImportJob *j) {
|
||||
i->temp_path = NULL;
|
||||
}
|
||||
|
||||
tar_import_report_progress(i, TAR_COPYING);
|
||||
tar_pull_report_progress(i, TAR_COPYING);
|
||||
|
||||
r = tar_import_make_local_copy(i);
|
||||
r = tar_pull_make_local_copy(i);
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
@ -304,8 +304,8 @@ finish:
|
||||
sd_event_exit(i->event, r);
|
||||
}
|
||||
|
||||
static int tar_import_job_on_open_disk(ImportJob *j) {
|
||||
TarImport *i;
|
||||
static int tar_pull_job_on_open_disk(PullJob *j) {
|
||||
TarPull *i;
|
||||
int r;
|
||||
|
||||
assert(j);
|
||||
@ -317,7 +317,7 @@ static int tar_import_job_on_open_disk(ImportJob *j) {
|
||||
assert(!i->temp_path);
|
||||
assert(i->tar_pid <= 0);
|
||||
|
||||
r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path);
|
||||
r = pull_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path);
|
||||
if (r < 0)
|
||||
return log_oom();
|
||||
|
||||
@ -334,25 +334,25 @@ static int tar_import_job_on_open_disk(ImportJob *j) {
|
||||
} else if (r < 0)
|
||||
return log_error_errno(errno, "Failed to create subvolume %s: %m", i->temp_path);
|
||||
|
||||
j->disk_fd = import_fork_tar(i->temp_path, &i->tar_pid);
|
||||
j->disk_fd = pull_fork_tar(i->temp_path, &i->tar_pid);
|
||||
if (j->disk_fd < 0)
|
||||
return j->disk_fd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tar_import_job_on_progress(ImportJob *j) {
|
||||
TarImport *i;
|
||||
static void tar_pull_job_on_progress(PullJob *j) {
|
||||
TarPull *i;
|
||||
|
||||
assert(j);
|
||||
assert(j->userdata);
|
||||
|
||||
i = j->userdata;
|
||||
|
||||
tar_import_report_progress(i, TAR_DOWNLOADING);
|
||||
tar_pull_report_progress(i, TAR_DOWNLOADING);
|
||||
}
|
||||
|
||||
int tar_import_pull(TarImport *i, const char *url, const char *local, bool force_local, ImportVerify verify) {
|
||||
int tar_pull_start(TarPull *i, const char *url, const char *local, bool force_local, ImportVerify verify) {
|
||||
int r;
|
||||
|
||||
assert(i);
|
||||
@ -372,40 +372,40 @@ int tar_import_pull(TarImport *i, const char *url, const char *local, bool force
|
||||
i->force_local = force_local;
|
||||
i->verify = verify;
|
||||
|
||||
r = import_job_new(&i->tar_job, url, i->glue, i);
|
||||
r = pull_job_new(&i->tar_job, url, i->glue, i);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
i->tar_job->on_finished = tar_import_job_on_finished;
|
||||
i->tar_job->on_open_disk = tar_import_job_on_open_disk;
|
||||
i->tar_job->on_progress = tar_import_job_on_progress;
|
||||
i->tar_job->on_finished = tar_pull_job_on_finished;
|
||||
i->tar_job->on_open_disk = tar_pull_job_on_open_disk;
|
||||
i->tar_job->on_progress = tar_pull_job_on_progress;
|
||||
i->tar_job->calc_checksum = verify != IMPORT_VERIFY_NO;
|
||||
i->tar_job->grow_machine_directory = i->grow_machine_directory;
|
||||
|
||||
r = import_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags);
|
||||
r = pull_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, tar_import_job_on_finished, i);
|
||||
r = pull_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, tar_pull_job_on_finished, i);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = import_job_begin(i->tar_job);
|
||||
r = pull_job_begin(i->tar_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (i->checksum_job) {
|
||||
i->checksum_job->on_progress = tar_import_job_on_progress;
|
||||
i->checksum_job->on_progress = tar_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->checksum_job);
|
||||
r = pull_job_begin(i->checksum_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (i->signature_job) {
|
||||
i->signature_job->on_progress = tar_import_job_on_progress;
|
||||
i->signature_job->on_progress = tar_pull_job_on_progress;
|
||||
|
||||
r = import_job_begin(i->signature_job);
|
||||
r = pull_job_begin(i->signature_job);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
@ -25,13 +25,13 @@
|
||||
#include "macro.h"
|
||||
#include "import-util.h"
|
||||
|
||||
typedef struct TarImport TarImport;
|
||||
typedef struct TarPull TarPull;
|
||||
|
||||
typedef void (*TarImportFinished)(TarImport *import, int error, void *userdata);
|
||||
typedef void (*TarPullFinished)(TarPull *pull, int error, void *userdata);
|
||||
|
||||
int tar_import_new(TarImport **import, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata);
|
||||
TarImport* tar_import_unref(TarImport *import);
|
||||
int tar_pull_new(TarPull **pull, sd_event *event, const char *image_root, TarPullFinished on_finished, void *userdata);
|
||||
TarPull* tar_pull_unref(TarPull *pull);
|
||||
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(TarImport*, tar_import_unref);
|
||||
DEFINE_TRIVIAL_CLEANUP_FUNC(TarPull*, tar_pull_unref);
|
||||
|
||||
int tar_import_pull(TarImport *import, const char *url, const char *local, bool force_local, ImportVerify verify);
|
||||
int tar_pull_start(TarPull *pull, const char *url, const char *local, bool force_local, ImportVerify verify);
|
@ -26,10 +26,10 @@
|
||||
#include "verbs.h"
|
||||
#include "build.h"
|
||||
#include "machine-image.h"
|
||||
#include "import-tar.h"
|
||||
#include "import-raw.h"
|
||||
#include "import-dkr.h"
|
||||
#include "import-util.h"
|
||||
#include "pull-tar.h"
|
||||
#include "pull-raw.h"
|
||||
#include "pull-dkr.h"
|
||||
|
||||
static bool arg_force = false;
|
||||
static const char *arg_image_root = "/var/lib/machines";
|
||||
@ -42,9 +42,9 @@ static int interrupt_signal_handler(sd_event_source *s, const struct signalfd_si
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void on_tar_finished(TarImport *import, int error, void *userdata) {
|
||||
static void on_tar_finished(TarPull *pull, int error, void *userdata) {
|
||||
sd_event *event = userdata;
|
||||
assert(import);
|
||||
assert(pull);
|
||||
|
||||
if (error == 0)
|
||||
log_info("Operation completed successfully.");
|
||||
@ -53,7 +53,7 @@ static void on_tar_finished(TarImport *import, int error, void *userdata) {
|
||||
}
|
||||
|
||||
static int pull_tar(int argc, char *argv[], void *userdata) {
|
||||
_cleanup_(tar_import_unrefp) TarImport *import = NULL;
|
||||
_cleanup_(tar_pull_unrefp) TarPull *pull = NULL;
|
||||
_cleanup_event_unref_ sd_event *event = NULL;
|
||||
const char *url, *local;
|
||||
_cleanup_free_ char *l = NULL, *ll = NULL;
|
||||
@ -112,11 +112,11 @@ static int pull_tar(int argc, char *argv[], void *userdata) {
|
||||
sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL);
|
||||
sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL);
|
||||
|
||||
r = tar_import_new(&import, event, arg_image_root, on_tar_finished, event);
|
||||
r = tar_pull_new(&pull, event, arg_image_root, on_tar_finished, event);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to allocate importer: %m");
|
||||
return log_error_errno(r, "Failed to allocate puller: %m");
|
||||
|
||||
r = tar_import_pull(import, url, local, arg_force, arg_verify);
|
||||
r = tar_pull_start(pull, url, local, arg_force, arg_verify);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to pull image: %m");
|
||||
|
||||
@ -128,9 +128,9 @@ static int pull_tar(int argc, char *argv[], void *userdata) {
|
||||
return -r;
|
||||
}
|
||||
|
||||
static void on_raw_finished(RawImport *import, int error, void *userdata) {
|
||||
static void on_raw_finished(RawPull *pull, int error, void *userdata) {
|
||||
sd_event *event = userdata;
|
||||
assert(import);
|
||||
assert(pull);
|
||||
|
||||
if (error == 0)
|
||||
log_info("Operation completed successfully.");
|
||||
@ -139,7 +139,7 @@ static void on_raw_finished(RawImport *import, int error, void *userdata) {
|
||||
}
|
||||
|
||||
static int pull_raw(int argc, char *argv[], void *userdata) {
|
||||
_cleanup_(raw_import_unrefp) RawImport *import = NULL;
|
||||
_cleanup_(raw_pull_unrefp) RawPull *pull = NULL;
|
||||
_cleanup_event_unref_ sd_event *event = NULL;
|
||||
const char *url, *local;
|
||||
_cleanup_free_ char *l = NULL, *ll = NULL;
|
||||
@ -198,11 +198,11 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
|
||||
sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL);
|
||||
sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL);
|
||||
|
||||
r = raw_import_new(&import, event, arg_image_root, on_raw_finished, event);
|
||||
r = raw_pull_new(&pull, event, arg_image_root, on_raw_finished, event);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to allocate importer: %m");
|
||||
return log_error_errno(r, "Failed to allocate puller: %m");
|
||||
|
||||
r = raw_import_pull(import, url, local, arg_force, arg_verify);
|
||||
r = raw_pull_start(pull, url, local, arg_force, arg_verify);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to pull image: %m");
|
||||
|
||||
@ -214,9 +214,9 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
|
||||
return -r;
|
||||
}
|
||||
|
||||
static void on_dkr_finished(DkrImport *import, int error, void *userdata) {
|
||||
static void on_dkr_finished(DkrPull *pull, int error, void *userdata) {
|
||||
sd_event *event = userdata;
|
||||
assert(import);
|
||||
assert(pull);
|
||||
|
||||
if (error == 0)
|
||||
log_info("Operation completed successfully.");
|
||||
@ -225,7 +225,7 @@ static void on_dkr_finished(DkrImport *import, int error, void *userdata) {
|
||||
}
|
||||
|
||||
static int pull_dkr(int argc, char *argv[], void *userdata) {
|
||||
_cleanup_(dkr_import_unrefp) DkrImport *import = NULL;
|
||||
_cleanup_(dkr_pull_unrefp) DkrPull *pull = NULL;
|
||||
_cleanup_event_unref_ sd_event *event = NULL;
|
||||
const char *name, *tag, *local;
|
||||
int r;
|
||||
@ -236,7 +236,7 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
|
||||
}
|
||||
|
||||
if (arg_verify != IMPORT_VERIFY_NO) {
|
||||
log_error("Imports from dkr do not support image verification, please pass --verify=no.");
|
||||
log_error("Pulls from dkr do not support image verification, please pass --verify=no.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -300,11 +300,11 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
|
||||
sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL);
|
||||
sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL);
|
||||
|
||||
r = dkr_import_new(&import, event, arg_dkr_index_url, arg_image_root, on_dkr_finished, event);
|
||||
r = dkr_pull_new(&pull, event, arg_dkr_index_url, arg_image_root, on_dkr_finished, event);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to allocate importer: %m");
|
||||
return log_error_errno(r, "Failed to allocate puller: %m");
|
||||
|
||||
r = dkr_import_pull(import, name, tag, local, arg_force);
|
||||
r = dkr_pull_start(pull, name, tag, local, arg_force);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to pull image: %m");
|
||||
|
||||
@ -319,7 +319,7 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
|
||||
static int help(int argc, char *argv[], void *userdata) {
|
||||
|
||||
printf("%s [OPTIONS...] {COMMAND} ...\n\n"
|
||||
"Import container or virtual machine image.\n\n"
|
||||
"Download container or virtual machine image.\n\n"
|
||||
" -h --help Show this help\n"
|
||||
" --version Show package version\n"
|
||||
" --force Force creation of image\n"
|
||||
@ -409,7 +409,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int import_main(int argc, char *argv[]) {
|
||||
static int pull_main(int argc, char *argv[]) {
|
||||
|
||||
static const Verb verbs[] = {
|
||||
{ "help", VERB_ANY, VERB_ANY, 0, help },
|
||||
@ -433,7 +433,7 @@ int main(int argc, char *argv[]) {
|
||||
if (r <= 0)
|
||||
goto finish;
|
||||
|
||||
r = import_main(argc, argv);
|
||||
r = pull_main(argc, argv);
|
||||
|
||||
finish:
|
||||
return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
|
||||
|
Loading…
x
Reference in New Issue
Block a user