gc: add a gcDelay param
Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>
This commit is contained in:
parent
35eeedb22a
commit
38a110314b
16
examples/config-gc.json
Normal file
16
examples/config-gc.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"version": "0.1.0-dev",
|
||||
"storage": {
|
||||
"rootDirectory": "/tmp/zot",
|
||||
"gc": true,
|
||||
"gcDelay": "1s"
|
||||
},
|
||||
"http": {
|
||||
"address": "127.0.0.1",
|
||||
"port": "8080",
|
||||
"ReadOnly": false
|
||||
},
|
||||
"log": {
|
||||
"level": "debug"
|
||||
}
|
||||
}
|
@ -2,6 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/getlantern/deepcopy"
|
||||
distspec "github.com/opencontainers/distribution-spec/specs-go"
|
||||
@ -9,6 +10,7 @@ import (
|
||||
"zotregistry.io/zot/errors"
|
||||
extconf "zotregistry.io/zot/pkg/extensions/config"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -22,6 +24,7 @@ type StorageConfig struct {
|
||||
GC bool
|
||||
Dedupe bool
|
||||
Commit bool
|
||||
GCDelay time.Duration
|
||||
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
}
|
||||
|
||||
@ -94,6 +97,7 @@ type GlobalStorageConfig struct {
|
||||
Dedupe bool
|
||||
GC bool
|
||||
Commit bool
|
||||
GCDelay time.Duration
|
||||
RootDirectory string
|
||||
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
SubPaths map[string]StorageConfig
|
||||
@ -134,7 +138,7 @@ func New() *Config {
|
||||
GoVersion: GoVersion,
|
||||
Commit: Commit,
|
||||
BinaryType: BinaryType,
|
||||
Storage: GlobalStorageConfig{GC: true, Dedupe: true},
|
||||
Storage: GlobalStorageConfig{GC: true, GCDelay: storage.DefaultGCDelay, Dedupe: true},
|
||||
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080"},
|
||||
Log: &LogConfig{Level: "debug"},
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ func (c *Controller) InitImageStore() error {
|
||||
var defaultStore storage.ImageStore
|
||||
if len(c.Config.Storage.StorageDriver) == 0 {
|
||||
defaultStore = storage.NewImageStore(c.Config.Storage.RootDirectory,
|
||||
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Config.Storage.Commit, c.Log, c.Metrics)
|
||||
c.Config.Storage.GC, c.Config.Storage.GCDelay, c.Config.Storage.Dedupe, c.Config.Storage.Commit, c.Log, c.Metrics)
|
||||
} else {
|
||||
storeName := fmt.Sprintf("%v", c.Config.Storage.StorageDriver["name"])
|
||||
if storeName != storage.S3StorageDriverName {
|
||||
@ -229,7 +229,8 @@ func (c *Controller) InitImageStore() error {
|
||||
}
|
||||
|
||||
defaultStore = s3.NewImageStore(c.Config.Storage.RootDirectory,
|
||||
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Config.Storage.Commit, c.Log, c.Metrics, store)
|
||||
c.Config.Storage.GC, c.Config.Storage.GCDelay, c.Config.Storage.Dedupe,
|
||||
c.Config.Storage.Commit, c.Log, c.Metrics, store)
|
||||
}
|
||||
|
||||
c.StoreController.DefaultStore = defaultStore
|
||||
@ -265,7 +266,7 @@ func (c *Controller) InitImageStore() error {
|
||||
|
||||
if len(storageConfig.StorageDriver) == 0 {
|
||||
subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory,
|
||||
storageConfig.GC, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics)
|
||||
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics)
|
||||
} else {
|
||||
storeName := fmt.Sprintf("%v", storageConfig.StorageDriver["name"])
|
||||
if storeName != storage.S3StorageDriverName {
|
||||
@ -281,7 +282,7 @@ func (c *Controller) InitImageStore() error {
|
||||
}
|
||||
|
||||
subImageStore[route] = s3.NewImageStore(storageConfig.RootDirectory,
|
||||
storageConfig.GC, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics, store)
|
||||
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics, store)
|
||||
}
|
||||
|
||||
// Enable extensions if extension config is provided
|
||||
|
@ -221,6 +221,18 @@ func NewCliRootCmd() *cobra.Command {
|
||||
}
|
||||
|
||||
func validateConfiguration(config *config.Config) {
|
||||
// enforce GC params
|
||||
if config.Storage.GCDelay < 0 {
|
||||
log.Error().Err(errors.ErrBadConfig).
|
||||
Msgf("invalid garbage-collect delay %v specified", config.Storage.GCDelay)
|
||||
panic(errors.ErrBadConfig)
|
||||
}
|
||||
|
||||
if !config.Storage.GC && config.Storage.GCDelay != 0 {
|
||||
log.Warn().Err(errors.ErrBadConfig).
|
||||
Msg("garbage-collect delay specified without enabling garbage-collect, will be ignored")
|
||||
}
|
||||
|
||||
// check authorization config, it should have basic auth enabled or ldap
|
||||
if config.HTTP.RawAccessControl != nil {
|
||||
if config.HTTP.Auth == nil || (config.HTTP.Auth.HTPasswd.Path == "" && config.HTTP.Auth.LDAP == nil) {
|
||||
|
@ -2,6 +2,7 @@ package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/api"
|
||||
"zotregistry.io/zot/pkg/api/config"
|
||||
"zotregistry.io/zot/pkg/cli"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -300,6 +302,55 @@ func TestLoadConfig(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGC(t *testing.T) {
|
||||
Convey("Test GC config", t, func(c C) {
|
||||
config := config.New()
|
||||
So(func() { cli.LoadConfiguration(config, "../../examples/config-multiple.json") }, ShouldNotPanic)
|
||||
So(config.Storage.GCDelay, ShouldEqual, storage.DefaultGCDelay)
|
||||
So(func() { cli.LoadConfiguration(config, "../../examples/config-gc.json") }, ShouldNotPanic)
|
||||
So(config.Storage.GCDelay, ShouldNotEqual, storage.DefaultGCDelay)
|
||||
})
|
||||
|
||||
Convey("Test GC config corner cases", t, func(c C) {
|
||||
contents, err := ioutil.ReadFile("../../examples/config-gc.json")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("GC delay without GC", func() {
|
||||
config := config.New()
|
||||
err = json.Unmarshal(contents, config)
|
||||
config.Storage.GC = false
|
||||
|
||||
file, err := ioutil.TempFile("", "gc-config-*.json")
|
||||
So(err, ShouldBeNil)
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
contents, err = json.MarshalIndent(config, "", " ")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = ioutil.WriteFile(file.Name(), contents, 0o600)
|
||||
So(err, ShouldBeNil)
|
||||
So(func() { cli.LoadConfiguration(config, file.Name()) }, ShouldNotPanic)
|
||||
})
|
||||
|
||||
Convey("Negative GC delay", func() {
|
||||
config := config.New()
|
||||
err = json.Unmarshal(contents, config)
|
||||
config.Storage.GCDelay = -1 * time.Second
|
||||
|
||||
file, err := ioutil.TempFile("", "gc-config-*.json")
|
||||
So(err, ShouldBeNil)
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
contents, err = json.MarshalIndent(config, "", " ")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = ioutil.WriteFile(file.Name(), contents, 0o600)
|
||||
So(err, ShouldBeNil)
|
||||
So(func() { cli.LoadConfiguration(config, file.Name()) }, ShouldPanic)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestScrub(t *testing.T) {
|
||||
oldArgs := os.Args
|
||||
|
||||
|
@ -132,7 +132,7 @@ func TestImageFormat(t *testing.T) {
|
||||
dbDir := "../../../../test/data"
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := storage.NewImageStore(dbDir, false, false, false, log, metrics)
|
||||
defaultStore := storage.NewImageStore(dbDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
storeController := storage.StoreController{DefaultStore: defaultStore}
|
||||
olu := common.NewOciLayoutUtils(storeController, log)
|
||||
|
||||
@ -525,9 +525,9 @@ func TestUtilsMethod(t *testing.T) {
|
||||
defer os.RemoveAll(subRootDir)
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := storage.NewImageStore(rootDir, false, false, false, log, metrics)
|
||||
defaultStore := storage.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
subStore := storage.NewImageStore(subRootDir, false, false, false, log, metrics)
|
||||
subStore := storage.NewImageStore(subRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
subStoreMap := make(map[string]storage.ImageStore)
|
||||
|
||||
|
@ -90,7 +90,7 @@ func testSetup() error {
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(dir, false, false, false, log, metrics)}
|
||||
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics)}
|
||||
|
||||
layoutUtils := common.NewOciLayoutUtils(storeController, log)
|
||||
|
||||
@ -347,11 +347,11 @@ func TestMultipleStoragePath(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
// Create ImageStore
|
||||
firstStore := storage.NewImageStore(firstRootDir, false, false, false, log, metrics)
|
||||
firstStore := storage.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
secondStore := storage.NewImageStore(secondRootDir, false, false, false, log, metrics)
|
||||
secondStore := storage.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
thirdStore := storage.NewImageStore(thirdRootDir, false, false, false, log, metrics)
|
||||
thirdStore := storage.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
|
||||
|
@ -98,7 +98,7 @@ func testSetup() error {
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storeController := storage.StoreController{
|
||||
DefaultStore: storage.NewImageStore(rootDir, false, false, false, log, metrics),
|
||||
DefaultStore: storage.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics),
|
||||
}
|
||||
|
||||
digestInfo = digestinfo.NewDigestInfo(storeController, log)
|
||||
|
@ -245,7 +245,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imageStore := storage.NewImageStore(storageDir, false, false, false, log, metrics)
|
||||
imageStore := storage.NewImageStore(storageDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
repoRefStr := fmt.Sprintf("%s/%s", host, testImage)
|
||||
repoRef, err := parseRepositoryReference(repoRefStr)
|
||||
@ -313,7 +313,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imageStore := storage.NewImageStore(storageDir, false, false, false, log, metrics)
|
||||
imageStore := storage.NewImageStore(storageDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
storeController.DefaultStore = imageStore
|
||||
@ -334,7 +334,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testImageStore := storage.NewImageStore(testRootDir, false, false, false, log, metrics)
|
||||
testImageStore := storage.NewImageStore(testRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
manifestContent, _, _, err := testImageStore.GetImageManifest(testImage, testImageTag)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/api/config"
|
||||
extconf "zotregistry.io/zot/pkg/extensions/config"
|
||||
"zotregistry.io/zot/pkg/extensions/sync"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -1017,6 +1018,7 @@ func TestBasicAuth(t *testing.T) {
|
||||
"a": {
|
||||
RootDirectory: destDir,
|
||||
GC: true,
|
||||
GCDelay: storage.DefaultGCDelay,
|
||||
Dedupe: true,
|
||||
},
|
||||
}
|
||||
@ -1770,6 +1772,7 @@ func TestSubPaths(t *testing.T) {
|
||||
subpath: {
|
||||
RootDirectory: subPathDestDir,
|
||||
GC: true,
|
||||
GCDelay: storage.DefaultGCDelay,
|
||||
Dedupe: true,
|
||||
},
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ func pushSyncedLocalImage(repo, tag, localCachePath string,
|
||||
imageStore := storeController.GetImageStore(repo)
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
cacheImageStore := storage.NewImageStore(localCachePath, false, false, false, log, metrics)
|
||||
cacheImageStore := storage.NewImageStore(localCachePath, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
manifestContent, _, _, err := cacheImageStore.GetImageManifest(repo, tag)
|
||||
if err != nil {
|
||||
|
@ -53,7 +53,7 @@ func skipIt(t *testing.T) {
|
||||
func createMockStorage(rootDir string, store driver.StorageDriver) storage.ImageStore {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
il := s3.NewImageStore(rootDir, false, false, false, log, metrics, store)
|
||||
il := s3.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, store)
|
||||
|
||||
return il
|
||||
}
|
||||
@ -86,7 +86,7 @@ func createObjectsStore(rootDir string) (driver.StorageDriver, storage.ImageStor
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
il := s3.NewImageStore(rootDir, false, false, false, log, metrics, store)
|
||||
il := s3.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, store)
|
||||
|
||||
return store, il, err
|
||||
}
|
||||
|
@ -63,7 +63,8 @@ func (is *ObjectStorage) DirExists(d string) bool {
|
||||
|
||||
// NewObjectStorage returns a new image store backed by cloud storages.
|
||||
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
|
||||
func NewImageStore(rootDir string, gc, dedupe, commit bool, log zlog.Logger, metrics monitoring.MetricServer,
|
||||
func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool,
|
||||
log zlog.Logger, metrics monitoring.MetricServer,
|
||||
store driver.StorageDriver) storage.ImageStore {
|
||||
imgStore := &ObjectStorage{
|
||||
rootDir: rootDir,
|
||||
|
@ -36,7 +36,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
Convey("Scrub only one repo", t, func(c C) {
|
||||
// initialize repo
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
const (
|
||||
S3StorageDriverName = "s3"
|
||||
DefaultGCDelay = 1 * time.Hour
|
||||
)
|
||||
|
||||
type ImageStore interface {
|
||||
|
@ -36,7 +36,6 @@ const (
|
||||
// BlobUploadDir defines the upload directory for blob uploads.
|
||||
BlobUploadDir = ".uploads"
|
||||
SchemaVersion = 2
|
||||
gcDelay = 1 * time.Hour
|
||||
DefaultFilePerms = 0o600
|
||||
DefaultDirPerms = 0o700
|
||||
RLOCK = "RLock"
|
||||
@ -63,6 +62,7 @@ type ImageStoreFS struct {
|
||||
gc bool
|
||||
dedupe bool
|
||||
commit bool
|
||||
gcDelay time.Duration
|
||||
log zerolog.Logger
|
||||
metrics monitoring.MetricServer
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (sc StoreController) GetImageStore(name string) ImageStore {
|
||||
}
|
||||
|
||||
// NewImageStore returns a new image store backed by a file storage.
|
||||
func NewImageStore(rootDir string, gc, dedupe, commit bool,
|
||||
func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool,
|
||||
log zlog.Logger, metrics monitoring.MetricServer) ImageStore {
|
||||
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(rootDir, DefaultDirPerms); err != nil {
|
||||
@ -120,6 +120,7 @@ func NewImageStore(rootDir string, gc, dedupe, commit bool,
|
||||
lock: &sync.RWMutex{},
|
||||
blobUploads: make(map[string]BlobUpload),
|
||||
gc: gc,
|
||||
gcDelay: gcDelay,
|
||||
dedupe: dedupe,
|
||||
commit: commit,
|
||||
log: log.With().Caller().Logger(),
|
||||
@ -695,7 +696,7 @@ func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaTyp
|
||||
}
|
||||
defer oci.Close()
|
||||
|
||||
if err := oci.GC(context.Background(), ifOlderThan(is, repo, gcDelay)); err != nil {
|
||||
if err := oci.GC(context.Background(), ifOlderThan(is, repo, is.gcDelay)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@ -796,7 +797,7 @@ func (is *ImageStoreFS) DeleteImageManifest(repo string, reference string) error
|
||||
}
|
||||
defer oci.Close()
|
||||
|
||||
if err := oci.GC(context.Background(), ifOlderThan(is, repo, gcDelay)); err != nil {
|
||||
if err := oci.GC(context.Background(), ifOlderThan(is, repo, is.gcDelay)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1142,7 +1143,7 @@ func (is *ImageStoreFS) FullBlobUpload(repo string, body io.Reader, digest strin
|
||||
|
||||
func (is *ImageStoreFS) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
|
||||
retry:
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: ENTER")
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
||||
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest.String())
|
||||
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -24,6 +25,10 @@ import (
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
const (
|
||||
tag = "1.0"
|
||||
)
|
||||
|
||||
func TestStorageFSAPIs(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "oci-repo-test")
|
||||
if err != nil {
|
||||
@ -34,7 +39,7 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
repoName := "test"
|
||||
@ -57,7 +62,7 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = "1.0"
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
@ -171,7 +176,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
Convey("Dedupe", t, func(c C) {
|
||||
// manifest1
|
||||
@ -311,7 +316,7 @@ func TestDedupe(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
il := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
il := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
|
||||
})
|
||||
@ -330,9 +335,9 @@ func TestNegativeCases(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
So(storage.NewImageStore(dir, true, true, true, log, metrics), ShouldNotBeNil)
|
||||
So(storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics), ShouldNotBeNil)
|
||||
if os.Geteuid() != 0 {
|
||||
So(storage.NewImageStore("/deadBEEF", true, true, true, log, metrics), ShouldBeNil)
|
||||
So(storage.NewImageStore("/deadBEEF", true, storage.DefaultGCDelay, true, true, log, metrics), ShouldBeNil)
|
||||
}
|
||||
})
|
||||
|
||||
@ -345,7 +350,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
err = os.Chmod(dir, 0o000) // remove all perms
|
||||
if err != nil {
|
||||
@ -384,7 +389,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -502,7 +507,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -529,7 +534,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -574,7 +579,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -636,7 +641,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
upload, err := imgStore.NewBlobUpload("dedupe1")
|
||||
So(err, ShouldBeNil)
|
||||
@ -796,7 +801,7 @@ func TestWriteFile(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
|
||||
Convey("Failure path1", func() {
|
||||
injected := test.InjectFailure(0)
|
||||
@ -828,7 +833,7 @@ func TestWriteFile(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, true, false, log, metrics)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, false, log, metrics)
|
||||
|
||||
Convey("Failure path not reached", func() {
|
||||
err := imgStore.InitRepo("repo1")
|
||||
@ -837,6 +842,180 @@ func TestWriteFile(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGarbageCollect(t *testing.T) {
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
dir, err := ioutil.TempDir("", "oci-gc-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
Convey("Garbage collect with default/long delay", func() {
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
repoName := "gc-long"
|
||||
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
|
||||
content := []byte("test-data1")
|
||||
buf := bytes.NewBuffer(content)
|
||||
buflen := buf.Len()
|
||||
bdigest := godigest.FromBytes(content)
|
||||
|
||||
blob, err := imgStore.PutBlobChunk(repoName, upload, 0, int64(buflen), buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: "application/vnd.oci.image.config.v1+json",
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: bdigest,
|
||||
Size: int64(buflen),
|
||||
},
|
||||
},
|
||||
Annotations: annotationsMap,
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 2
|
||||
manifestBuf, _ := json.Marshal(manifest)
|
||||
digest := godigest.FromBytes(manifestBuf)
|
||||
|
||||
_, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
})
|
||||
|
||||
Convey("Garbage collect with short delay", func() {
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics)
|
||||
repoName := "gc-short"
|
||||
|
||||
// upload orphan blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
|
||||
content := []byte("test-data1")
|
||||
buf := bytes.NewBuffer(content)
|
||||
buflen := buf.Len()
|
||||
odigest := godigest.FromBytes(content)
|
||||
|
||||
blob, err := imgStore.PutBlobChunk(repoName, upload, 0, int64(buflen), buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, odigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// sleep so orphan blob can be GC'ed
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// upload blob
|
||||
upload, err = imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
|
||||
content = []byte("test-data2")
|
||||
buf = bytes.NewBuffer(content)
|
||||
buflen = buf.Len()
|
||||
bdigest := godigest.FromBytes(content)
|
||||
|
||||
blob, err = imgStore.PutBlobChunk(repoName, upload, 0, int64(buflen), buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: "application/vnd.oci.image.config.v1+json",
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: bdigest,
|
||||
Size: int64(buflen),
|
||||
},
|
||||
},
|
||||
Annotations: annotationsMap,
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 2
|
||||
manifestBuf, _ := json.Marshal(manifest)
|
||||
digest := godigest.FromBytes(manifestBuf)
|
||||
|
||||
_, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
So(hasBlob, ShouldEqual, false)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
// sleep so orphan blob can be GC'ed
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
So(hasBlob, ShouldEqual, false)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func randSeq(n int) string {
|
||||
letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
|
||||
|
@ -72,7 +72,7 @@ func createObjectsStore(rootDir string) (driver.StorageDriver, storage.ImageStor
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
il := s3.NewImageStore(rootDir, false, false, false, log, metrics, store)
|
||||
il := s3.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, store)
|
||||
|
||||
return store, il, err
|
||||
}
|
||||
@ -120,7 +120,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore = storage.NewImageStore(dir, true, true, true, log, metrics)
|
||||
imgStore = storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics)
|
||||
}
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
@ -711,11 +711,11 @@ func TestStorageHandler(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
// Create ImageStore
|
||||
firstStore = storage.NewImageStore(firstRootDir, false, false, false, log, metrics)
|
||||
firstStore = storage.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
secondStore = storage.NewImageStore(secondRootDir, false, false, false, log, metrics)
|
||||
secondStore = storage.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
|
||||
thirdStore = storage.NewImageStore(thirdRootDir, false, false, false, log, metrics)
|
||||
thirdStore = storage.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics)
|
||||
}
|
||||
|
||||
Convey("Test storage handler", t, func() {
|
||||
|
Loading…
x
Reference in New Issue
Block a user