config: add gc and dedupe as configurable params (default to enabled)
Since we want to conform to dist-spec, sometimes the gc and dedupe optimizations conflict with the conformance tests that are being run. So allow them to be turned off via configuration params.
This commit is contained in:
parent
b1f882e1b8
commit
dd1fc1e866
@ -12,6 +12,8 @@ var Commit string
|
||||
|
||||
type StorageConfig struct {
|
||||
RootDirectory string
|
||||
GC bool
|
||||
Dedupe bool
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
@ -77,6 +79,7 @@ func NewConfig() *Config {
|
||||
return &Config{
|
||||
Version: dspec.Version,
|
||||
Commit: Commit,
|
||||
Storage: StorageConfig{GC: true, Dedupe: true},
|
||||
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080"},
|
||||
Log: &LogConfig{Level: "debug"},
|
||||
}
|
||||
|
@ -42,7 +42,8 @@ func (c *Controller) Run() error {
|
||||
engine.Use(log.SessionLogger(c.Log), handlers.RecoveryHandler(handlers.RecoveryLogger(c.Log),
|
||||
handlers.PrintRecoveryStack(false)))
|
||||
|
||||
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Log)
|
||||
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Config.Storage.GC,
|
||||
c.Config.Storage.Dedupe, c.Log)
|
||||
if c.ImageStore == nil {
|
||||
// we can't proceed without at least a image store
|
||||
os.Exit(1)
|
||||
|
@ -38,11 +38,13 @@ type ImageStore struct {
|
||||
lock *sync.RWMutex
|
||||
blobUploads map[string]BlobUpload
|
||||
cache *Cache
|
||||
gc bool
|
||||
dedupe bool
|
||||
log zerolog.Logger
|
||||
}
|
||||
|
||||
// NewImageStore returns a new image store backed by a file storage.
|
||||
func NewImageStore(rootDir string, log zlog.Logger) *ImageStore {
|
||||
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger) *ImageStore {
|
||||
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(rootDir, 0700); err != nil {
|
||||
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
|
||||
@ -54,10 +56,15 @@ func NewImageStore(rootDir string, log zlog.Logger) *ImageStore {
|
||||
rootDir: rootDir,
|
||||
lock: &sync.RWMutex{},
|
||||
blobUploads: make(map[string]BlobUpload),
|
||||
cache: NewCache(rootDir, "cache", log),
|
||||
gc: gc,
|
||||
dedupe: dedupe,
|
||||
log: log.With().Caller().Logger(),
|
||||
}
|
||||
|
||||
if dedupe {
|
||||
is.cache = NewCache(rootDir, "cache", log)
|
||||
}
|
||||
|
||||
return is
|
||||
}
|
||||
|
||||
@ -467,14 +474,16 @@ func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType
|
||||
return "", err
|
||||
}
|
||||
|
||||
oci, err := umoci.OpenLayout(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer oci.Close()
|
||||
if is.gc {
|
||||
oci, err := umoci.OpenLayout(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer oci.Close()
|
||||
|
||||
if err := oci.GC(context.Background()); err != nil {
|
||||
return "", err
|
||||
if err := oci.GC(context.Background()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return desc.Digest.String(), nil
|
||||
@ -541,14 +550,16 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
oci, err := umoci.OpenLayout(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer oci.Close()
|
||||
if is.gc {
|
||||
oci, err := umoci.OpenLayout(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer oci.Close()
|
||||
|
||||
if err := oci.GC(context.Background()); err != nil {
|
||||
return err
|
||||
if err := oci.GC(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
@ -733,15 +744,21 @@ func (is *ImageStore) FinishBlobUpload(repo string, uuid string, body io.Reader,
|
||||
ensureDir(dir, is.log)
|
||||
dst := is.BlobPath(repo, dstDigest)
|
||||
|
||||
if is.cache != nil {
|
||||
if is.dedupe && is.cache != nil {
|
||||
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to dedupe blob")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to finish blob")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// FullBlobUpload handles a full blob upload, and no partial session is created
|
||||
@ -792,15 +809,21 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, digest string)
|
||||
ensureDir(dir, is.log)
|
||||
dst := is.BlobPath(repo, dstDigest)
|
||||
|
||||
if is.cache != nil {
|
||||
if is.dedupe && is.cache != nil {
|
||||
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to dedupe blob")
|
||||
return "", -1, err
|
||||
}
|
||||
} else {
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to finish blob")
|
||||
return "", -1, err
|
||||
}
|
||||
}
|
||||
|
||||
return uuid, n, err
|
||||
return uuid, n, nil
|
||||
}
|
||||
|
||||
// nolint (interfacer)
|
||||
|
@ -27,7 +27,7 @@ func TestAPIs(t *testing.T) {
|
||||
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
repoName := "test"
|
||||
@ -404,7 +404,7 @@ func TestDedupe(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
is := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
is := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
|
||||
So(is.DedupeBlob("", "", ""), ShouldNotBeNil)
|
||||
})
|
||||
@ -419,8 +419,8 @@ func TestNegativeCases(t *testing.T) {
|
||||
}
|
||||
os.RemoveAll(dir)
|
||||
|
||||
So(storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
|
||||
So(storage.NewImageStore("/deadBEEF", log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
|
||||
So(storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
|
||||
So(storage.NewImageStore("/deadBEEF", true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Invalid init repo", t, func(c C) {
|
||||
@ -429,7 +429,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
err = os.Chmod(dir, 0000) // remove all perms
|
||||
So(err, ShouldBeNil)
|
||||
So(func() { _ = il.InitRepo("test") }, ShouldPanic)
|
||||
@ -441,7 +441,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
So(il, ShouldNotBeNil)
|
||||
So(il.InitRepo("test"), ShouldBeNil)
|
||||
files, err := ioutil.ReadDir(path.Join(dir, "test"))
|
||||
@ -472,7 +472,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
il = storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
So(il, ShouldNotBeNil)
|
||||
So(il.InitRepo("test"), ShouldBeNil)
|
||||
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
|
||||
@ -495,7 +495,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
il = storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
|
||||
So(il, ShouldNotBeNil)
|
||||
So(il.InitRepo("test"), ShouldBeNil)
|
||||
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
|
||||
|
Loading…
Reference in New Issue
Block a user