fix(gc): sync repodb when gc'ing manifests (#1819)
fix(gc): fix cleaning deduped blobs because they have the modTime of the original blobs, fixed by updating the modTime when hard linking the blobs. fix(gc): failing to parse rootDir at zot startup when using s3 storage because there are no files under rootDir and we can not create empty dirs on s3, fixed by creating an empty file under rootDir. Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
parent
7c78f80a96
commit
1df743f173
@ -29,6 +29,7 @@ import (
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/gc"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -338,7 +339,13 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
|
||||
|
||||
// Enable running garbage-collect periodically for DefaultStore
|
||||
if c.Config.Storage.GC {
|
||||
c.StoreController.DefaultStore.RunGCPeriodically(c.Config.Storage.GCInterval, taskScheduler)
|
||||
gc := gc.NewGarbageCollect(c.StoreController.DefaultStore, c.MetaDB, gc.Options{
|
||||
Referrers: c.Config.Storage.GCReferrers,
|
||||
Delay: c.Config.Storage.GCDelay,
|
||||
RetentionDelay: c.Config.Storage.UntaggedImageRetentionDelay,
|
||||
}, c.Log)
|
||||
|
||||
gc.CleanImageStorePeriodically(c.Config.Storage.GCInterval, taskScheduler)
|
||||
}
|
||||
|
||||
// Enable running dedupe blobs both ways (dedupe or restore deduped blobs)
|
||||
@ -354,7 +361,14 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
|
||||
for route, storageConfig := range c.Config.Storage.SubPaths {
|
||||
// Enable running garbage-collect periodically for subImageStore
|
||||
if storageConfig.GC {
|
||||
c.StoreController.SubStore[route].RunGCPeriodically(storageConfig.GCInterval, taskScheduler)
|
||||
gc := gc.NewGarbageCollect(c.StoreController.SubStore[route], c.MetaDB,
|
||||
gc.Options{
|
||||
Referrers: storageConfig.GCReferrers,
|
||||
Delay: storageConfig.GCDelay,
|
||||
RetentionDelay: storageConfig.UntaggedImageRetentionDelay,
|
||||
}, c.Log)
|
||||
|
||||
gc.CleanImageStorePeriodically(storageConfig.GCInterval, taskScheduler)
|
||||
}
|
||||
|
||||
// Enable extensions if extension config is provided for subImageStore
|
||||
|
@ -60,6 +60,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/meta"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/gc"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
testc "zotregistry.io/zot/pkg/test/common"
|
||||
. "zotregistry.io/zot/pkg/test/image-utils"
|
||||
@ -2338,11 +2339,9 @@ func TestBearerAuthWrongAuthorizer(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctlr := makeController(conf, t.TempDir())
|
||||
cm := test.NewControllerManager(ctlr)
|
||||
|
||||
So(func() {
|
||||
ctx := context.Background()
|
||||
cm.RunServer(ctx)
|
||||
api.AuthHandler(ctlr)
|
||||
}, ShouldPanic)
|
||||
})
|
||||
}
|
||||
@ -7665,7 +7664,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
|
||||
Convey("Make controller", t, func() {
|
||||
Convey("Garbage collect signatures without subject and manifests without tags", func(c C) {
|
||||
repoName := "testrepo" //nolint:goconst
|
||||
@ -7676,6 +7675,16 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
|
||||
value := true
|
||||
searchConfig := &extconf.SearchConfig{
|
||||
BaseConfig: extconf.BaseConfig{Enable: &value},
|
||||
}
|
||||
|
||||
// added search extensions so that metaDB is initialized and its tested in GC logic
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: searchConfig,
|
||||
}
|
||||
|
||||
ctlr := makeController(conf, t.TempDir())
|
||||
|
||||
dir := t.TempDir()
|
||||
@ -7686,15 +7695,24 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
|
||||
ctlr.Config.Storage.Dedupe = false
|
||||
|
||||
err := test.WriteImageToFileSystem(CreateDefaultImage(), repoName, tag,
|
||||
test.GetDefaultStoreController(dir, ctlr.Log))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cm := test.NewControllerManager(ctlr)
|
||||
cm.StartServer()
|
||||
cm.WaitServerToBeReady(port)
|
||||
defer cm.StopServer()
|
||||
|
||||
img := CreateDefaultImage()
|
||||
|
||||
err := UploadImage(img, baseURL, repoName, tag)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
gc := gc.NewGarbageCollect(ctlr.StoreController.DefaultStore, ctlr.MetaDB,
|
||||
gc.Options{
|
||||
Referrers: ctlr.Config.Storage.GCReferrers,
|
||||
Delay: ctlr.Config.Storage.GCDelay,
|
||||
RetentionDelay: ctlr.Config.Storage.UntaggedImageRetentionDelay,
|
||||
},
|
||||
ctlr.Log)
|
||||
|
||||
resp, err := resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, tag))
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
|
||||
@ -7748,6 +7766,9 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
|
||||
|
||||
cosignDigest := resp.Header().Get(constants.DistContentDigestKey)
|
||||
So(cosignDigest, ShouldNotBeEmpty)
|
||||
|
||||
// get notation signature manifest
|
||||
resp, err = resty.R().SetQueryParam("artifactType", notreg.ArtifactTypeNotation).Get(
|
||||
fmt.Sprintf("%s/v2/%s/referrers/%s", baseURL, repoName, digest.String()))
|
||||
@ -7760,6 +7781,20 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(len(index.Manifests), ShouldEqual, 1)
|
||||
|
||||
// shouldn't do anything
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// make sure both signatures are stored in repodb
|
||||
repoMeta, err := ctlr.MetaDB.GetRepoMeta(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sigMeta := repoMeta.Signatures[digest.String()]
|
||||
So(len(sigMeta[storage.CosignType]), ShouldEqual, 1)
|
||||
So(len(sigMeta[storage.NotationType]), ShouldEqual, 1)
|
||||
So(sigMeta[storage.CosignType][0].SignatureManifestDigest, ShouldEqual, cosignDigest)
|
||||
So(sigMeta[storage.NotationType][0].SignatureManifestDigest, ShouldEqual, index.Manifests[0].Digest.String())
|
||||
|
||||
Convey("Trigger gcNotationSignatures() error", func() {
|
||||
var refs ispec.Index
|
||||
err = json.Unmarshal(resp.Body(), &refs)
|
||||
@ -7773,7 +7808,7 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
err = UploadImage(img, baseURL, repoName, img.DigestStr())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = os.Chmod(path.Join(dir, repoName, "blobs", "sha256", refs.Manifests[0].Digest.Encoded()), 0o755)
|
||||
@ -7787,7 +7822,7 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
err = UploadImage(img, baseURL, repoName, tag)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = os.WriteFile(path.Join(dir, repoName, "blobs", "sha256", refs.Manifests[0].Digest.Encoded()), content, 0o600)
|
||||
@ -7811,6 +7846,17 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
}, baseURL, repoName, untaggedManifestDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// make sure repoDB reference was added
|
||||
repoMeta, err := ctlr.MetaDB.GetRepoMeta(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, ok := repoMeta.Referrers[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeTrue)
|
||||
_, ok = repoMeta.Signatures[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeTrue)
|
||||
_, ok = repoMeta.Statistics[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeTrue)
|
||||
|
||||
// overwrite image so that signatures will get invalidated and gc'ed
|
||||
cfg, layers, manifest, err = test.GetImageComponents(3) //nolint:staticcheck
|
||||
So(err, ShouldBeNil)
|
||||
@ -7827,9 +7873,24 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
newManifestDigest := godigest.FromBytes(manifestBuf)
|
||||
|
||||
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// make sure both signatures are removed from repodb and repo reference for untagged is removed
|
||||
repoMeta, err = ctlr.MetaDB.GetRepoMeta(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sigMeta := repoMeta.Signatures[digest.String()]
|
||||
So(len(sigMeta[storage.CosignType]), ShouldEqual, 0)
|
||||
So(len(sigMeta[storage.NotationType]), ShouldEqual, 0)
|
||||
|
||||
_, ok = repoMeta.Referrers[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeFalse)
|
||||
_, ok = repoMeta.Signatures[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeFalse)
|
||||
_, ok = repoMeta.Statistics[untaggedManifestDigest.String()]
|
||||
So(ok, ShouldBeFalse)
|
||||
|
||||
// both signatures should be gc'ed
|
||||
resp, err = resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, cosignTag))
|
||||
So(err, ShouldBeNil)
|
||||
@ -7885,6 +7946,13 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
cm.StartAndWait(port)
|
||||
defer cm.StopServer()
|
||||
|
||||
gc := gc.NewGarbageCollect(ctlr.StoreController.DefaultStore, ctlr.MetaDB,
|
||||
gc.Options{
|
||||
Referrers: ctlr.Config.Storage.GCReferrers,
|
||||
Delay: ctlr.Config.Storage.GCDelay,
|
||||
RetentionDelay: ctlr.Config.Storage.UntaggedImageRetentionDelay,
|
||||
}, ctlr.Log)
|
||||
|
||||
resp, err := resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, tag))
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
|
||||
@ -7934,7 +8002,7 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusCreated)
|
||||
|
||||
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
resp, err = resty.R().SetHeader("Content-Type", ispec.MediaTypeImageIndex).
|
||||
|
@ -470,7 +470,7 @@ func TestNegativeServerResponse(t *testing.T) {
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
|
@ -195,7 +195,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
|
||||
writers := io.MultiWriter(os.Stdout, logFile)
|
||||
logger.Logger = logger.Output(writers)
|
||||
|
||||
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(globalDir, false, false,
|
||||
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -315,7 +315,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
|
||||
writers := io.MultiWriter(os.Stdout, logFile)
|
||||
logger.Logger = logger.Output(writers)
|
||||
|
||||
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(globalDir, false, false,
|
||||
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -422,7 +422,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
|
||||
writers := io.MultiWriter(os.Stdout, logFile)
|
||||
logger.Logger = logger.Output(writers)
|
||||
|
||||
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(globalDir, false, false,
|
||||
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -584,7 +584,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
|
||||
writers := io.MultiWriter(os.Stdout, logFile)
|
||||
logger.Logger = logger.Output(writers)
|
||||
|
||||
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(globalDir, false, false,
|
||||
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -845,7 +845,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
|
||||
writers := io.MultiWriter(os.Stdout, logFile)
|
||||
logger.Logger = logger.Output(writers)
|
||||
|
||||
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(globalDir, false, false,
|
||||
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
|
@ -492,7 +492,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
var index ispec.Index
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
indexContent, err := imgStore.GetIndexContent("zot-test")
|
||||
@ -524,7 +524,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
var index ispec.Index
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
indexContent, err := imgStore.GetIndexContent("zot-test")
|
||||
@ -594,7 +594,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -656,7 +656,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -720,7 +720,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -783,7 +783,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000)
|
||||
@ -881,7 +881,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000)
|
||||
|
@ -200,7 +200,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, 1*time.Second, true,
|
||||
imgStore := local.NewImageStore(dir, true,
|
||||
true, log, metrics, nil, cacheDriver)
|
||||
|
||||
srcStorageCtlr := test.GetDefaultStoreController(dir, log)
|
||||
@ -236,7 +236,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, 1*time.Second, true,
|
||||
imgStore := local.NewImageStore(dir, true,
|
||||
true, log, metrics, nil, cacheDriver)
|
||||
|
||||
srcStorageCtlr := test.GetDefaultStoreController(dir, log)
|
||||
@ -278,9 +278,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second,
|
||||
1*time.Second, true, true, log, metrics, nil, cacheDriver,
|
||||
)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
srcStorageCtlr := test.GetDefaultStoreController(dir, log)
|
||||
image := CreateDefaultVulnerableImage()
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/meta/boltdb"
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
. "zotregistry.io/zot/pkg/test/image-utils"
|
||||
@ -323,8 +322,7 @@ func TestImageFormat(t *testing.T) {
|
||||
dbDir := t.TempDir()
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := local.NewImageStore(imgDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
defaultStore := local.NewImageStore(imgDir, false, false, log, metrics, nil, nil)
|
||||
storeController := storage.StoreController{DefaultStore: defaultStore}
|
||||
|
||||
params := boltdb.DBParameters{
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/meta/boltdb"
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/imagestore"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
storageTypes "zotregistry.io/zot/pkg/storage/types"
|
||||
@ -75,14 +74,11 @@ func TestMultipleStoragePath(t *testing.T) {
|
||||
|
||||
// Create ImageStore
|
||||
|
||||
firstStore := local.NewImageStore(firstRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
firstStore := local.NewImageStore(firstRootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
secondStore := local.NewImageStore(secondRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
secondStore := local.NewImageStore(secondRootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
thirdStore := local.NewImageStore(thirdRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
thirdStore := local.NewImageStore(thirdRootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
|
||||
@ -190,8 +186,7 @@ func TestTrivyLibraryErrors(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
// Create ImageStore
|
||||
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
storeController.DefaultStore = store
|
||||
@ -408,8 +403,7 @@ func TestImageScannable(t *testing.T) {
|
||||
// Continue with initializing the objects the scanner depends on
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
storeController.DefaultStore = store
|
||||
@ -475,8 +469,7 @@ func TestDefaultTrivyDBUrl(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
// Create ImageStore
|
||||
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
storeController.DefaultStore = store
|
||||
|
@ -186,7 +186,7 @@ func TestVulnerableLayer(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
log := log.NewLogger("debug", "")
|
||||
imageStore := local.NewImageStore(tempDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(tempDir, false, false,
|
||||
log, monitoring.NewMetricsServer(false, log), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
storageTypes "zotregistry.io/zot/pkg/storage/types"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
@ -1162,7 +1161,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
|
||||
ctlr := api.NewController(conf)
|
||||
|
||||
imageStore := local.NewImageStore(tempDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(tempDir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -1290,8 +1289,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
testStorage := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
testStorage := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil)
|
||||
|
||||
resp, err := resty.R().Get(baseURL + "/v2/")
|
||||
So(resp, ShouldNotBeNil)
|
||||
@ -1636,7 +1634,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
conf.Extensions.Search.CVE = nil
|
||||
ctlr := api.NewController(conf)
|
||||
|
||||
imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -5387,8 +5385,7 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
|
||||
// get signatur digest
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storage := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil)
|
||||
|
||||
indexBlob, err := storage.GetIndexContent(repo)
|
||||
So(err, ShouldBeNil)
|
||||
@ -5464,8 +5461,7 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
|
||||
// get signatur digest
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storage := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
|
||||
storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil)
|
||||
|
||||
indexBlob, err := storage.GetIndexContent(repo)
|
||||
So(err, ShouldBeNil)
|
||||
@ -5679,7 +5675,7 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
|
||||
}
|
||||
|
||||
ctlr.MetaDB = mocks.MetaDBMock{
|
||||
DeleteRepoTagFn: func(repo, tag string) error { return ErrTestError },
|
||||
RemoveRepoReferenceFn: func(repo, reference string, manifestDigest godigest.Digest) error { return ErrTestError },
|
||||
}
|
||||
|
||||
resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" +
|
||||
|
@ -544,7 +544,7 @@ func TestChangingRepoState(t *testing.T) {
|
||||
}
|
||||
|
||||
// ------ Create the test repos
|
||||
defaultStore := local.NewImageStore(conf.Storage.RootDirectory, false, false, 0, 0, false, false,
|
||||
defaultStore := local.NewImageStore(conf.Storage.RootDirectory, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
err = WriteImageToFileSystem(img, accesibleRepo, "tag", storage.StoreController{
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
storageCommon "zotregistry.io/zot/pkg/storage/common"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
storageTypes "zotregistry.io/zot/pkg/storage/types"
|
||||
)
|
||||
@ -281,9 +280,7 @@ func getImageStoreFromImageReference(imageReference types.ImageReference, repo,
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log.Logger{})
|
||||
|
||||
tempImageStore := local.NewImageStore(tempRootDir, false, false,
|
||||
storageConstants.DefaultGCDelay, storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
false, false, log.Logger{}, metrics, nil, nil)
|
||||
tempImageStore := local.NewImageStore(tempRootDir, false, false, log.Logger{}, metrics, nil, nil)
|
||||
|
||||
return tempImageStore
|
||||
}
|
||||
|
@ -69,9 +69,7 @@ func TestInjectSyncUtils(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imageStore := local.NewImageStore(t.TempDir(), false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil,
|
||||
)
|
||||
imageStore := local.NewImageStore(t.TempDir(), false, false, log, metrics, nil, nil)
|
||||
injected = inject.InjectFailure(0)
|
||||
|
||||
ols := NewOciLayoutStorage(storage.StoreController{DefaultStore: imageStore})
|
||||
@ -183,8 +181,7 @@ func TestLocalRegistry(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
syncImgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
syncImgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "repo"
|
||||
|
||||
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, nil, log)
|
||||
@ -301,8 +298,7 @@ func TestLocalRegistry(t *testing.T) {
|
||||
MandatoryAnnotations: []string{"annot1"},
|
||||
}, log)
|
||||
|
||||
syncImgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, linter, cacheDriver)
|
||||
syncImgStore := local.NewImageStore(dir, true, true, log, metrics, linter, cacheDriver)
|
||||
repoName := "repo"
|
||||
|
||||
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, nil, log)
|
||||
|
@ -404,6 +404,73 @@ func (bdw BoltDB) GetReferrersInfo(repo string, referredDigest godigest.Digest,
|
||||
return referrersInfoResult, err
|
||||
}
|
||||
|
||||
/*
|
||||
RemoveRepoReference removes the tag from RepoMetadata if the reference is a tag,
|
||||
|
||||
it also removes its corresponding digest from Statistics, Signatures and Referrers if there are no tags
|
||||
pointing to it.
|
||||
If the reference is a digest then it will remove the digest from Statistics, Signatures and Referrers only
|
||||
if there are no tags pointing to the digest, otherwise it's noop.
|
||||
*/
|
||||
func (bdw *BoltDB) RemoveRepoReference(repo, reference string, manifestDigest godigest.Digest) error {
|
||||
err := bdw.DB.Update(func(tx *bbolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
|
||||
repoMeta := mTypes.RepoMetadata{
|
||||
Name: repo,
|
||||
Tags: map[string]mTypes.Descriptor{},
|
||||
Statistics: map[string]mTypes.DescriptorStatistics{},
|
||||
Signatures: map[string]mTypes.ManifestSignatures{},
|
||||
Referrers: map[string][]mTypes.ReferrerInfo{},
|
||||
}
|
||||
|
||||
// object not found
|
||||
if len(repoMetaBlob) > 0 {
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !common.ReferenceIsDigest(reference) {
|
||||
delete(repoMeta.Tags, reference)
|
||||
} else {
|
||||
// remove all tags pointing to this digest
|
||||
for tag, desc := range repoMeta.Tags {
|
||||
if desc.Digest == reference {
|
||||
delete(repoMeta.Tags, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* try to find at least one tag pointing to manifestDigest
|
||||
if not found then we can also remove everything related to this digest */
|
||||
var foundTag bool
|
||||
for _, desc := range repoMeta.Tags {
|
||||
if desc.Digest == manifestDigest.String() {
|
||||
foundTag = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundTag {
|
||||
delete(repoMeta.Statistics, manifestDigest.String())
|
||||
delete(repoMeta.Signatures, manifestDigest.String())
|
||||
delete(repoMeta.Referrers, manifestDigest.String())
|
||||
}
|
||||
|
||||
repoMetaBlob, err := json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw *BoltDB) SetRepoReference(repo string, reference string, manifestDigest godigest.Digest,
|
||||
mediaType string,
|
||||
) error {
|
||||
|
@ -447,6 +447,79 @@ func (dwr DynamoDB) GetReferrersInfo(repo string, referredDigest godigest.Digest
|
||||
return filteredResults, nil
|
||||
}
|
||||
|
||||
/*
|
||||
RemoveRepoReference removes the tag from RepoMetadata if the reference is a tag,
|
||||
|
||||
it also removes its corresponding digest from Statistics, Signatures and Referrers if there are no tags
|
||||
pointing to it.
|
||||
If the reference is a digest then it will remove the digest from Statistics, Signatures and Referrers only
|
||||
if there are no tags pointing to the digest, otherwise it's noop.
|
||||
*/
|
||||
func (dwr *DynamoDB) RemoveRepoReference(repo, reference string, manifestDigest godigest.Digest,
|
||||
) error {
|
||||
resp, err := dwr.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{
|
||||
TableName: aws.String(dwr.RepoMetaTablename),
|
||||
Key: map[string]types.AttributeValue{
|
||||
"RepoName": &types.AttributeValueMemberS{Value: repo},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMeta := mTypes.RepoMetadata{
|
||||
Name: repo,
|
||||
Tags: map[string]mTypes.Descriptor{},
|
||||
Statistics: map[string]mTypes.DescriptorStatistics{},
|
||||
Signatures: map[string]mTypes.ManifestSignatures{},
|
||||
Referrers: map[string][]mTypes.ReferrerInfo{},
|
||||
}
|
||||
|
||||
if resp.Item != nil {
|
||||
err := attributevalue.Unmarshal(resp.Item["RepoMetadata"], &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !common.ReferenceIsDigest(reference) {
|
||||
delete(repoMeta.Tags, reference)
|
||||
} else {
|
||||
// find all tags pointing to this digest
|
||||
tags := []string{}
|
||||
for tag, desc := range repoMeta.Tags {
|
||||
if desc.Digest == reference {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// remove all tags
|
||||
for _, tag := range tags {
|
||||
delete(repoMeta.Tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
/* try to find at least one tag pointing to manifestDigest
|
||||
if not found then we can also remove everything related to this digest */
|
||||
var foundTag bool
|
||||
|
||||
for _, desc := range repoMeta.Tags {
|
||||
if desc.Digest == manifestDigest.String() {
|
||||
foundTag = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundTag {
|
||||
delete(repoMeta.Statistics, manifestDigest.String())
|
||||
delete(repoMeta.Signatures, manifestDigest.String())
|
||||
delete(repoMeta.Referrers, manifestDigest.String())
|
||||
}
|
||||
|
||||
err = dwr.SetRepoMeta(repo, repoMeta)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (dwr *DynamoDB) SetRepoReference(repo string, reference string, manifestDigest godigest.Digest,
|
||||
mediaType string,
|
||||
) error {
|
||||
|
@ -109,7 +109,7 @@ func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest,
|
||||
manageRepoMetaSuccessfully = false
|
||||
}
|
||||
} else {
|
||||
err = metaDB.DeleteRepoTag(repo, reference)
|
||||
err = metaDB.RemoveRepoReference(repo, reference, digest)
|
||||
if err != nil {
|
||||
log.Info().Msg("metadb: restoring image store")
|
||||
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
notreg "github.com/notaryproject/notation-go/registry"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
@ -32,9 +31,7 @@ func TestOnUpdateManifest(t *testing.T) {
|
||||
storeController := storage.StoreController{}
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, 1*time.Second,
|
||||
1*time.Second, true, true, log, metrics, nil, nil,
|
||||
)
|
||||
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, log, metrics, nil, nil)
|
||||
|
||||
params := boltdb.DBParameters{
|
||||
RootDir: rootDir,
|
||||
@ -73,9 +70,7 @@ func TestOnUpdateManifest(t *testing.T) {
|
||||
storeController := storage.StoreController{}
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, 1*time.Second,
|
||||
1*time.Second, true, true, log, metrics, nil, nil,
|
||||
)
|
||||
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, log, metrics, nil, nil)
|
||||
|
||||
metaDB := mocks.MetaDBMock{
|
||||
SetManifestDataFn: func(manifestDigest godigest.Digest, mm mTypes.ManifestData) error {
|
||||
|
@ -641,7 +641,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Test DeleteRepoTag", func() {
|
||||
Convey("Test RemoveRepoReference and DeleteRepoTag", func() {
|
||||
var (
|
||||
repo = "repo1"
|
||||
tag1 = "0.0.1"
|
||||
@ -656,6 +656,62 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
|
||||
err = metaDB.SetRepoReference(repo, tag2, manifestDigest2, ispec.MediaTypeImageManifest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Delete reference from repo", func() {
|
||||
_, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = metaDB.RemoveRepoReference(repo, tag1, manifestDigest1)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
repoMeta, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, ok := repoMeta.Tags[tag1]
|
||||
So(ok, ShouldBeFalse)
|
||||
So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String())
|
||||
})
|
||||
|
||||
Convey("Delete a reference from repo", func() {
|
||||
_, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// shouldn't do anything because there is tag1 pointing to it
|
||||
err = metaDB.RemoveRepoReference(repo, manifestDigest1.String(), manifestDigest1)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
repoMeta, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, ok := repoMeta.Tags[tag1]
|
||||
So(ok, ShouldBeFalse)
|
||||
So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String())
|
||||
})
|
||||
|
||||
Convey("Delete inexistent reference from repo", func() {
|
||||
inexistentDigest := godigest.FromBytes([]byte("inexistent"))
|
||||
err := metaDB.RemoveRepoReference(repo, inexistentDigest.String(), inexistentDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
repoMeta, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(repoMeta.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String())
|
||||
So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String())
|
||||
})
|
||||
|
||||
Convey("Delete reference from inexistent repo", func() {
|
||||
inexistentDigest := godigest.FromBytes([]byte("inexistent"))
|
||||
|
||||
err := metaDB.RemoveRepoReference("InexistentRepo", inexistentDigest.String(), inexistentDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
repoMeta, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(repoMeta.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String())
|
||||
So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String())
|
||||
})
|
||||
|
||||
Convey("Delete from repo a tag", func() {
|
||||
_, err := metaDB.GetRepoMeta(repo)
|
||||
So(err, ShouldBeNil)
|
||||
@ -682,7 +738,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
|
||||
So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String())
|
||||
})
|
||||
|
||||
Convey("Delete from inexistent repo", func() {
|
||||
Convey("Delete tag from inexistent repo", func() {
|
||||
err := metaDB.DeleteRepoTag("InexistentRepo", "InexistentTag")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
|
@ -400,7 +400,7 @@ func TestParseStorageDynamoWrapper(t *testing.T) {
|
||||
|
||||
func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
|
||||
Convey("Test with simple case", func() {
|
||||
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(rootDir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: imageStore}
|
||||
@ -486,7 +486,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
|
||||
})
|
||||
|
||||
Convey("Accept orphan signatures", func() {
|
||||
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(rootDir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: imageStore}
|
||||
@ -543,7 +543,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
|
||||
})
|
||||
|
||||
Convey("Check statistics after load", func() {
|
||||
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(rootDir, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: imageStore}
|
||||
|
@ -45,6 +45,16 @@ type MetaDB interface { //nolint:interfacebloat
|
||||
// SetRepoReference sets the reference of a manifest in the tag list of a repo
|
||||
SetRepoReference(repo string, reference string, manifestDigest godigest.Digest, mediaType string) error
|
||||
|
||||
/*
|
||||
RemoveRepoReference removes the tag from RepoMetadata if the reference is a tag,
|
||||
|
||||
it also removes its corresponding digest from Statistics, Signatures and Referrers if there are no tags
|
||||
pointing to it.
|
||||
If the reference is a digest then it will remove the digest from Statistics, Signatures and Referrers only
|
||||
if there are no tags pointing to the digest, otherwise it's noop
|
||||
*/
|
||||
RemoveRepoReference(repo, reference string, manifestDigest godigest.Digest) error
|
||||
|
||||
// DeleteRepoTag delets the tag from the tag list of a repo
|
||||
DeleteRepoTag(repo string, tag string) error
|
||||
|
||||
|
@ -6,10 +6,8 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
@ -501,6 +499,30 @@ func isBlobReferencedInImageManifest(imgStore storageTypes.ImageStore, repo stri
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isBlobReferencedInORASManifest(imgStore storageTypes.ImageStore, repo string,
|
||||
bdigest, mdigest godigest.Digest, log zlog.Logger,
|
||||
) (bool, error) {
|
||||
if bdigest == mdigest {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
manifestContent, err := GetOrasManifestByDigest(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, blob := range manifestContent.Blobs {
|
||||
if bdigest == blob.Digest {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func IsBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
|
||||
digest godigest.Digest, index ispec.Index, log zlog.Logger,
|
||||
) (bool, error) {
|
||||
@ -520,6 +542,8 @@ func IsBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
|
||||
found, _ = IsBlobReferencedInImageIndex(imgStore, repo, digest, indexImage, log)
|
||||
case ispec.MediaTypeImageManifest:
|
||||
found, _ = isBlobReferencedInImageManifest(imgStore, repo, digest, desc.Digest, log)
|
||||
case oras.MediaTypeArtifactManifest:
|
||||
found, _ = isBlobReferencedInORASManifest(imgStore, repo, digest, desc.Digest, log)
|
||||
default:
|
||||
log.Warn().Str("mediatype", desc.MediaType).Msg("unknown media-type")
|
||||
// should return true for digests found in index.json even if we don't know it's mediatype
|
||||
@ -552,132 +576,6 @@ func IsBlobReferenced(imgStore storageTypes.ImageStore, repo string,
|
||||
return IsBlobReferencedInImageIndex(imgStore, repo, digest, index, log)
|
||||
}
|
||||
|
||||
/* Garbage Collection */
|
||||
|
||||
func AddImageManifestBlobsToReferences(imgStore storageTypes.ImageStore,
|
||||
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
|
||||
) error {
|
||||
manifestContent, err := GetImageManifest(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
refBlobs[manifestContent.Config.Digest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if manifestContent.Subject != nil {
|
||||
refBlobs[manifestContent.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, layer := range manifestContent.Layers {
|
||||
refBlobs[layer.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddORASImageManifestBlobsToReferences(imgStore storageTypes.ImageStore,
|
||||
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
|
||||
) error {
|
||||
manifestContent, err := GetOrasManifestByDigest(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if manifestContent.Subject != nil {
|
||||
refBlobs[manifestContent.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, blob := range manifestContent.Blobs {
|
||||
refBlobs[blob.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddImageIndexBlobsToReferences(imgStore storageTypes.ImageStore,
|
||||
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
|
||||
) error {
|
||||
index, err := GetImageIndex(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if index.Subject != nil {
|
||||
refBlobs[index.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
refBlobs[manifest.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddIndexBlobToReferences(imgStore storageTypes.ImageStore,
|
||||
repo string, index ispec.Index, refBlobs map[string]bool, log zlog.Logger,
|
||||
) error {
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
if err := AddImageIndexBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("failed to read blobs in multiarch(index) image")
|
||||
|
||||
return err
|
||||
}
|
||||
case ispec.MediaTypeImageManifest:
|
||||
if err := AddImageManifestBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("failed to read blobs in image manifest")
|
||||
|
||||
return err
|
||||
}
|
||||
case oras.MediaTypeArtifactManifest:
|
||||
if err := AddORASImageManifestBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("failed to read blobs in image manifest")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddRepoBlobsToReferences(imgStore storageTypes.ImageStore,
|
||||
repo string, refBlobs map[string]bool, log zlog.Logger,
|
||||
) error {
|
||||
dir := path.Join(imgStore.RootDir(), repo)
|
||||
if !imgStore.DirExists(dir) {
|
||||
return zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
index, err := GetIndex(imgStore, repo, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return AddIndexBlobToReferences(imgStore, repo, index, refBlobs, log)
|
||||
}
|
||||
|
||||
func ApplyLinter(imgStore storageTypes.ImageStore, linter Lint, repo string, descriptor ispec.Descriptor,
|
||||
) (bool, error) {
|
||||
pass := true
|
||||
@ -1042,77 +940,3 @@ func (dt *dedupeTask) DoWork(ctx context.Context) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
GCTaskGenerator takes all repositories found in the storage.imagestore
|
||||
|
||||
and it will execute garbage collection for each repository by creating a task
|
||||
for each repository and pushing it to the task scheduler.
|
||||
*/
|
||||
type GCTaskGenerator struct {
|
||||
ImgStore storageTypes.ImageStore
|
||||
lastRepo string
|
||||
nextRun time.Time
|
||||
done bool
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) getRandomDelay() int {
|
||||
maxDelay := 30
|
||||
|
||||
return gen.rand.Intn(maxDelay)
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) Next() (scheduler.Task, error) {
|
||||
if gen.lastRepo == "" && gen.nextRun.IsZero() {
|
||||
gen.rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) //nolint: gosec
|
||||
}
|
||||
|
||||
delay := gen.getRandomDelay()
|
||||
|
||||
gen.nextRun = time.Now().Add(time.Duration(delay) * time.Second)
|
||||
|
||||
repo, err := gen.ImgStore.GetNextRepository(gen.lastRepo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if repo == "" {
|
||||
gen.done = true
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
gen.lastRepo = repo
|
||||
|
||||
return NewGCTask(gen.ImgStore, repo), nil
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) IsDone() bool {
|
||||
return gen.done
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) IsReady() bool {
|
||||
return time.Now().After(gen.nextRun)
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) Reset() {
|
||||
gen.lastRepo = ""
|
||||
gen.done = false
|
||||
gen.nextRun = time.Time{}
|
||||
}
|
||||
|
||||
type gcTask struct {
|
||||
imgStore storageTypes.ImageStore
|
||||
repo string
|
||||
}
|
||||
|
||||
func NewGCTask(imgStore storageTypes.ImageStore, repo string,
|
||||
) *gcTask {
|
||||
return &gcTask{imgStore, repo}
|
||||
}
|
||||
|
||||
func (gct *gcTask) DoWork(ctx context.Context) error {
|
||||
// run task
|
||||
return gct.imgStore.RunGCRepo(gct.repo)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
@ -20,7 +19,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
common "zotregistry.io/zot/pkg/storage/common"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
. "zotregistry.io/zot/pkg/test/image-utils"
|
||||
@ -38,8 +36,7 @@ func TestValidateManifest(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
content := []byte("this is a blob")
|
||||
digest := godigest.FromBytes(content)
|
||||
@ -157,8 +154,7 @@ func TestGetReferrersErrors(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, false, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
artifactType := "application/vnd.example.icecream.v1"
|
||||
validDigest := godigest.FromBytes([]byte("blob"))
|
||||
@ -433,193 +429,3 @@ func TestIsSignature(t *testing.T) {
|
||||
So(isSingature, ShouldBeFalse)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGarbageCollectManifestErrors(t *testing.T) {
|
||||
Convey("Make imagestore and upload manifest", t, func(c C) {
|
||||
dir := t.TempDir()
|
||||
|
||||
repoName := "test"
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||
RootDir: dir,
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("trigger repo not found in GetReferencedBlobs()", func() {
|
||||
err := common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
content := []byte("this is a blob")
|
||||
digest := godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
|
||||
_, blen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blen, ShouldEqual, len(content))
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: digest,
|
||||
Size: int64(len(content)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 2
|
||||
|
||||
body, err := json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestDigest := godigest.FromBytes(body)
|
||||
|
||||
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, body)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("trigger GetIndex error in GetReferencedBlobs", func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("trigger GetImageManifest error in GetReferencedBlobsInImageManifest", func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGarbageCollectIndexErrors(t *testing.T) {
|
||||
Convey("Make imagestore and upload manifest", t, func(c C) {
|
||||
dir := t.TempDir()
|
||||
|
||||
repoName := "test"
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||
RootDir: dir,
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
content := []byte("this is a blob")
|
||||
bdgst := godigest.FromBytes(content)
|
||||
So(bdgst, ShouldNotBeNil)
|
||||
|
||||
_, bsize, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), bdgst)
|
||||
So(err, ShouldBeNil)
|
||||
So(bsize, ShouldEqual, len(content))
|
||||
|
||||
var index ispec.Index
|
||||
index.SchemaVersion = 2
|
||||
index.MediaType = ispec.MediaTypeImageIndex
|
||||
|
||||
var digest godigest.Digest
|
||||
for i := 0; i < 4; i++ {
|
||||
// upload image config blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
buf := bytes.NewBuffer(cblob)
|
||||
buflen := buf.Len()
|
||||
blob, err := imgStore.PutBlobChunkStreamed(repoName, upload, buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
// create a manifest
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: bdgst,
|
||||
Size: bsize,
|
||||
},
|
||||
},
|
||||
}
|
||||
manifest.SchemaVersion = 2
|
||||
content, err = json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
digest = godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
_, _, err = imgStore.PutImageManifest(repoName, digest.String(), ispec.MediaTypeImageManifest, content)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
index.Manifests = append(index.Manifests, ispec.Descriptor{
|
||||
Digest: digest,
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Size: int64(len(content)),
|
||||
})
|
||||
}
|
||||
|
||||
// upload index image
|
||||
indexContent, err := json.Marshal(index)
|
||||
So(err, ShouldBeNil)
|
||||
indexDigest := godigest.FromBytes(indexContent)
|
||||
So(indexDigest, ShouldNotBeNil)
|
||||
|
||||
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageIndex, indexContent)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("trigger GetImageIndex error in GetReferencedBlobsInImageIndex", func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
708
pkg/storage/gc/gc.go
Normal file
708
pkg/storage/gc/gc.go
Normal file
@ -0,0 +1,708 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
|
||||
zerr "zotregistry.io/zot/errors"
|
||||
zcommon "zotregistry.io/zot/pkg/common"
|
||||
zlog "zotregistry.io/zot/pkg/log"
|
||||
mTypes "zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
common "zotregistry.io/zot/pkg/storage/common"
|
||||
"zotregistry.io/zot/pkg/storage/types"
|
||||
)
|
||||
|
||||
const (
|
||||
cosignSignatureTagSuffix = "sig"
|
||||
SBOMTagSuffix = "sbom"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
// will garbage collect referrers with missing subject older than Delay
|
||||
Referrers bool
|
||||
// will garbage collect blobs older than Delay
|
||||
Delay time.Duration
|
||||
// will garbage collect untagged manifests older than RetentionDelay
|
||||
RetentionDelay time.Duration
|
||||
}
|
||||
|
||||
type GarbageCollect struct {
|
||||
imgStore types.ImageStore
|
||||
opts Options
|
||||
metaDB mTypes.MetaDB
|
||||
log zlog.Logger
|
||||
}
|
||||
|
||||
func NewGarbageCollect(imgStore types.ImageStore, metaDB mTypes.MetaDB, opts Options, log zlog.Logger,
|
||||
) GarbageCollect {
|
||||
return GarbageCollect{
|
||||
imgStore: imgStore,
|
||||
metaDB: metaDB,
|
||||
opts: opts,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
CleanImageStorePeriodically runs a periodic garbage collect on the ImageStore provided in constructor,
|
||||
given an interval and a Scheduler.
|
||||
*/
|
||||
func (gc GarbageCollect) CleanImageStorePeriodically(interval time.Duration, sch *scheduler.Scheduler) {
|
||||
generator := &GCTaskGenerator{
|
||||
imgStore: gc.imgStore,
|
||||
gc: gc,
|
||||
}
|
||||
|
||||
sch.SubmitGenerator(generator, interval, scheduler.MediumPriority)
|
||||
}
|
||||
|
||||
/*
|
||||
CleanRepo executes a garbage collection of any blob found in storage which is not referenced
|
||||
in any manifests referenced in repo's index.json
|
||||
It also gc referrers with missing subject if the Referrer Option is enabled
|
||||
It also gc untagged manifests.
|
||||
*/
|
||||
func (gc GarbageCollect) CleanRepo(repo string) error {
|
||||
gc.log.Info().Msg(fmt.Sprintf("executing GC of orphaned blobs for %s", path.Join(gc.imgStore.RootDir(), repo)))
|
||||
|
||||
if err := gc.cleanRepo(repo); err != nil {
|
||||
errMessage := fmt.Sprintf("error while running GC for %s", path.Join(gc.imgStore.RootDir(), repo))
|
||||
gc.log.Error().Err(err).Msg(errMessage)
|
||||
gc.log.Info().Msg(fmt.Sprintf("GC unsuccessfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
gc.log.Info().Msg(fmt.Sprintf("GC successfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) cleanRepo(repo string) error {
|
||||
var lockLatency time.Time
|
||||
|
||||
dir := path.Join(gc.imgStore.RootDir(), repo)
|
||||
if !gc.imgStore.DirExists(dir) {
|
||||
return zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
gc.imgStore.Lock(&lockLatency)
|
||||
defer gc.imgStore.Unlock(&lockLatency)
|
||||
|
||||
/* this index (which represents the index.json of this repo) is the root point from which we
|
||||
search for dangling manifests/blobs
|
||||
so this index is passed by reference in all functions that modifies it
|
||||
|
||||
Instead of removing manifests one by one with storage APIs we just remove manifests descriptors
|
||||
from index.Manifests[] list and update repo's index.json afterwards.
|
||||
|
||||
After updating repo's index.json we clean all unreferenced blobs (manifests included).
|
||||
*/
|
||||
index, err := common.GetIndex(gc.imgStore, repo, gc.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gc referrers manifests with missing subject and untagged manifests
|
||||
if err := gc.cleanManifests(repo, &index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update repos's index.json in storage
|
||||
if err := gc.imgStore.PutIndexContent(repo, index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gc unreferenced blobs
|
||||
if err := gc.cleanBlobs(repo, index, gc.opts.Delay, gc.log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) cleanManifests(repo string, index *ispec.Index) error {
|
||||
var err error
|
||||
|
||||
/* gc all manifests that have a missing subject, stop when neither gc(referrer and untagged)
|
||||
happened in a full loop over index.json. */
|
||||
var stop bool
|
||||
for !stop {
|
||||
var gcedReferrer bool
|
||||
|
||||
if gc.opts.Referrers {
|
||||
gc.log.Debug().Str("repository", repo).Msg("gc: manifests with missing referrers")
|
||||
|
||||
gcedReferrer, err = gc.cleanIndexReferrers(repo, index, *index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
referenced := make(map[godigest.Digest]bool, 0)
|
||||
|
||||
/* gather all manifests referenced in multiarch images/by other manifests
|
||||
so that we can skip them in cleanUntaggedManifests */
|
||||
if err := gc.identifyManifestsReferencedInIndex(*index, repo, referenced); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// apply image retention policy
|
||||
gcedManifest, err := gc.cleanUntaggedManifests(repo, index, referenced)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* if we gced any manifest then loop again and gc manifests with
|
||||
a subject pointing to the last ones which were gced. */
|
||||
stop = !gcedReferrer && !gcedManifest
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
garbageCollectIndexReferrers will gc all referrers with a missing subject recursively
|
||||
|
||||
rootIndex is indexJson, need to pass it down to garbageCollectReferrer()
|
||||
rootIndex is the place we look for referrers.
|
||||
*/
|
||||
func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index, index ispec.Index,
|
||||
) (bool, error) {
|
||||
var count int
|
||||
|
||||
var err error
|
||||
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read multiarch(index) image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
gced, err := gc.cleanReferrer(repo, rootIndex, desc, indexImage.Subject, indexImage.ArtifactType)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
/* if we gc index then no need to continue searching for referrers inside it.
|
||||
they will be gced when the next garbage collect is executed(if they are older than retentionDelay),
|
||||
because manifests part of indexes will still be referenced in index.json */
|
||||
if gced {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
gced, err = gc.cleanIndexReferrers(repo, rootIndex, indexImage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if gced {
|
||||
count++
|
||||
}
|
||||
case ispec.MediaTypeImageManifest, oras.MediaTypeArtifactManifest:
|
||||
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
artifactType := zcommon.GetManifestArtifactType(image)
|
||||
|
||||
gced, err := gc.cleanReferrer(repo, rootIndex, desc, image.Subject, artifactType)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if gced {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) cleanReferrer(repo string, index *ispec.Index, manifestDesc ispec.Descriptor,
|
||||
subject *ispec.Descriptor, artifactType string,
|
||||
) (bool, error) {
|
||||
var gced bool
|
||||
|
||||
var err error
|
||||
|
||||
if subject != nil {
|
||||
// try to find subject in index.json
|
||||
referenced := isManifestReferencedInIndex(index, subject.Digest)
|
||||
|
||||
var signatureType string
|
||||
// check if its notation signature
|
||||
if artifactType == zcommon.ArtifactTypeNotation {
|
||||
signatureType = storage.NotationType
|
||||
}
|
||||
|
||||
if !referenced {
|
||||
gced, err = gc.gcManifest(repo, index, manifestDesc, signatureType, subject.Digest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cosign
|
||||
tag, ok := manifestDesc.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
if strings.HasPrefix(tag, "sha256-") && (strings.HasSuffix(tag, cosignSignatureTagSuffix) ||
|
||||
strings.HasSuffix(tag, SBOMTagSuffix)) {
|
||||
subjectDigest := getSubjectFromCosignTag(tag)
|
||||
referenced := isManifestReferencedInIndex(index, subjectDigest)
|
||||
|
||||
if !referenced {
|
||||
gced, err = gc.gcManifest(repo, index, manifestDesc, storage.CosignType, subjectDigest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gced, nil
|
||||
}
|
||||
|
||||
// gcManifest removes a manifest entry from an index and syncs metaDB accordingly if the blob is older than gc.Delay.
|
||||
func (gc GarbageCollect) gcManifest(repo string, index *ispec.Index, desc ispec.Descriptor,
|
||||
signatureType string, subjectDigest godigest.Digest,
|
||||
) (bool, error) {
|
||||
var gced bool
|
||||
|
||||
canGC, err := isBlobOlderThan(gc.imgStore, repo, desc.Digest, gc.opts.Delay, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Str("delay", gc.opts.Delay.String()).Msg("gc: failed to check if blob is older than delay")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if canGC {
|
||||
if gced, err = gc.removeManifest(repo, index, desc, signatureType, subjectDigest); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return gced, nil
|
||||
}
|
||||
|
||||
// removeManifest removes a manifest entry from an index and syncs metaDB accordingly.
|
||||
func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
|
||||
desc ispec.Descriptor, signatureType string, subjectDigest godigest.Digest,
|
||||
) (bool, error) {
|
||||
gc.log.Debug().Str("repository", repo).Str("digest", desc.Digest.String()).Msg("gc: removing manifest")
|
||||
|
||||
// remove from index
|
||||
_, err := common.RemoveManifestDescByReference(index, desc.Digest.String(), true)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestConflict) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// sync metaDB
|
||||
if gc.metaDB != nil {
|
||||
if signatureType != "" {
|
||||
err = gc.metaDB.DeleteSignature(repo, subjectDigest, mTypes.SignatureMetadata{
|
||||
SignatureDigest: desc.Digest.String(),
|
||||
SignatureType: signatureType,
|
||||
})
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Msg("gc,metadb: unable to remove signature in metaDB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
err := gc.metaDB.RemoveRepoReference(repo, desc.Digest.String(), desc.Digest)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Msg("gc, metadb: unable to remove repo reference in metaDB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) cleanUntaggedManifests(repo string, index *ispec.Index,
|
||||
referenced map[godigest.Digest]bool,
|
||||
) (bool, error) {
|
||||
var gced bool
|
||||
|
||||
var err error
|
||||
|
||||
gc.log.Debug().Str("repository", repo).Msg("gc: manifests without tags")
|
||||
|
||||
// first gather manifests part of image indexes and referrers, we want to skip checking them
|
||||
for _, desc := range index.Manifests {
|
||||
// skip manifests referenced in image indexes
|
||||
if _, referenced := referenced[desc.Digest]; referenced {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove untagged images
|
||||
if desc.MediaType == ispec.MediaTypeImageManifest || desc.MediaType == ispec.MediaTypeImageIndex {
|
||||
_, ok := desc.Annotations[ispec.AnnotationRefName]
|
||||
if !ok {
|
||||
gced, err = gc.gcManifest(repo, index, desc, "", "")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gced, nil
|
||||
}
|
||||
|
||||
// Adds both referenced manifests and referrers from an index.
|
||||
func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, repo string,
|
||||
referenced map[godigest.Digest]bool,
|
||||
) error {
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read multiarch(index) image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if indexImage.Subject != nil {
|
||||
referenced[desc.Digest] = true
|
||||
}
|
||||
|
||||
for _, indexDesc := range indexImage.Manifests {
|
||||
referenced[indexDesc.Digest] = true
|
||||
}
|
||||
|
||||
if err := gc.identifyManifestsReferencedInIndex(indexImage, repo, referenced); err != nil {
|
||||
return err
|
||||
}
|
||||
case ispec.MediaTypeImageManifest, oras.MediaTypeArtifactManifest:
|
||||
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if image.Subject != nil {
|
||||
referenced[desc.Digest] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanBlobs gc all blobs which are not referenced by any manifest found in repo's index.json.
|
||||
func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
|
||||
delay time.Duration, log zlog.Logger,
|
||||
) error {
|
||||
gc.log.Debug().Str("repository", repo).Msg("gc: blobs")
|
||||
|
||||
refBlobs := map[string]bool{}
|
||||
|
||||
err := gc.addIndexBlobsToReferences(repo, index, refBlobs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("gc: unable to get referenced blobs in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
allBlobs, err := gc.imgStore.GetAllBlobs(repo)
|
||||
if err != nil {
|
||||
// /blobs/sha256/ may be empty in the case of s3, no need to return err, we want to skip
|
||||
if errors.As(err, &driver.PathNotFoundError{}) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Error().Err(err).Str("repository", repo).Msg("gc: unable to get all blobs")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
gcBlobs := make([]godigest.Digest, 0)
|
||||
|
||||
for _, blob := range allBlobs {
|
||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
|
||||
if err = digest.Validate(); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("gc: unable to parse digest")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := refBlobs[digest.String()]; !ok {
|
||||
canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("gc: unable to determine GC delay")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if canGC {
|
||||
gcBlobs = append(gcBlobs, digest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we removed all blobs from repo
|
||||
removeRepo := len(gcBlobs) > 0 && len(gcBlobs) == len(allBlobs)
|
||||
|
||||
reaped, err := gc.imgStore.CleanupRepo(repo, gcBlobs, removeRepo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("repository", repo).Int("count", reaped).Msg("gc: garbage collected blobs")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIndexBlobsToReferences adds referenced blobs found in referenced manifests (index.json) in refblobs map.
|
||||
func (gc GarbageCollect) addIndexBlobsToReferences(repo string, index ispec.Index, refBlobs map[string]bool,
|
||||
) error {
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
if err := gc.addImageIndexBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read blobs in multiarch(index) image")
|
||||
|
||||
return err
|
||||
}
|
||||
case ispec.MediaTypeImageManifest:
|
||||
if err := gc.addImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read blobs in image manifest")
|
||||
|
||||
return err
|
||||
}
|
||||
case oras.MediaTypeArtifactManifest:
|
||||
if err := gc.addORASImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read blobs in ORAS image manifest")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) addImageIndexBlobsToReferences(repo string, mdigest godigest.Digest, refBlobs map[string]bool,
|
||||
) error {
|
||||
index, err := common.GetImageIndex(gc.imgStore, repo, mdigest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if index.Subject != nil {
|
||||
refBlobs[index.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
refBlobs[manifest.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) addImageManifestBlobsToReferences(repo string, mdigest godigest.Digest,
|
||||
refBlobs map[string]bool,
|
||||
) error {
|
||||
manifestContent, err := common.GetImageManifest(gc.imgStore, repo, mdigest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
refBlobs[manifestContent.Config.Digest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if manifestContent.Subject != nil {
|
||||
refBlobs[manifestContent.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, layer := range manifestContent.Layers {
|
||||
refBlobs[layer.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc GarbageCollect) addORASImageManifestBlobsToReferences(repo string, mdigest godigest.Digest,
|
||||
refBlobs map[string]bool,
|
||||
) error {
|
||||
manifestContent, err := common.GetOrasManifestByDigest(gc.imgStore, repo, mdigest, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
refBlobs[mdigest.String()] = true
|
||||
|
||||
// if there is a Subject, it may not exist yet and that is ok
|
||||
if manifestContent.Subject != nil {
|
||||
refBlobs[manifestContent.Subject.Digest.String()] = true
|
||||
}
|
||||
|
||||
for _, blob := range manifestContent.Blobs {
|
||||
refBlobs[blob.Digest.String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isManifestReferencedInIndex(index *ispec.Index, digest godigest.Digest) bool {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.Digest == digest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isBlobOlderThan(imgStore types.ImageStore, repo string,
|
||||
digest godigest.Digest, delay time.Duration, log zlog.Logger,
|
||||
) (bool, error) {
|
||||
_, _, modtime, err := imgStore.StatBlob(repo, digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).
|
||||
Msg("gc: failed to stat blob")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if modtime.Add(delay).After(time.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func getSubjectFromCosignTag(tag string) godigest.Digest {
|
||||
alg := strings.Split(tag, "-")[0]
|
||||
encoded := strings.Split(strings.Split(tag, "-")[1], ".sig")[0]
|
||||
|
||||
return godigest.NewDigestFromEncoded(godigest.Algorithm(alg), encoded)
|
||||
}
|
||||
|
||||
/*
|
||||
GCTaskGenerator takes all repositories found in the storage.imagestore
|
||||
|
||||
and it will execute garbage collection for each repository by creating a task
|
||||
for each repository and pushing it to the task scheduler.
|
||||
*/
|
||||
type GCTaskGenerator struct {
|
||||
imgStore types.ImageStore
|
||||
gc GarbageCollect
|
||||
lastRepo string
|
||||
nextRun time.Time
|
||||
done bool
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) getRandomDelay() int {
|
||||
maxDelay := 30
|
||||
|
||||
return gen.rand.Intn(maxDelay)
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) Next() (scheduler.Task, error) {
|
||||
if gen.lastRepo == "" && gen.nextRun.IsZero() {
|
||||
gen.rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) //nolint: gosec
|
||||
}
|
||||
|
||||
delay := gen.getRandomDelay()
|
||||
|
||||
gen.nextRun = time.Now().Add(time.Duration(delay) * time.Second)
|
||||
|
||||
repo, err := gen.imgStore.GetNextRepository(gen.lastRepo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if repo == "" {
|
||||
gen.done = true
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
gen.lastRepo = repo
|
||||
|
||||
return NewGCTask(gen.imgStore, gen.gc, repo), nil
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) IsDone() bool {
|
||||
return gen.done
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) IsReady() bool {
|
||||
return time.Now().After(gen.nextRun)
|
||||
}
|
||||
|
||||
func (gen *GCTaskGenerator) Reset() {
|
||||
gen.lastRepo = ""
|
||||
gen.done = false
|
||||
gen.nextRun = time.Time{}
|
||||
}
|
||||
|
||||
type gcTask struct {
|
||||
imgStore types.ImageStore
|
||||
gc GarbageCollect
|
||||
repo string
|
||||
}
|
||||
|
||||
func NewGCTask(imgStore types.ImageStore, gc GarbageCollect, repo string,
|
||||
) *gcTask {
|
||||
return &gcTask{imgStore, gc, repo}
|
||||
}
|
||||
|
||||
func (gct *gcTask) DoWork(ctx context.Context) error {
|
||||
// run task
|
||||
return gct.gc.CleanRepo(gct.repo)
|
||||
}
|
567
pkg/storage/gc/gc_internal_test.go
Normal file
567
pkg/storage/gc/gc_internal_test.go
Normal file
@ -0,0 +1,567 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
zcommon "zotregistry.io/zot/pkg/common"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/meta/types"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
common "zotregistry.io/zot/pkg/storage/common"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
)
|
||||
|
||||
var (
|
||||
errGC = errors.New("gc error")
|
||||
repoName = "test" //nolint: gochecknoglobals
|
||||
)
|
||||
|
||||
func TestGarbageCollectManifestErrors(t *testing.T) {
|
||||
Convey("Make imagestore and upload manifest", t, func(c C) {
|
||||
dir := t.TempDir()
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||
RootDir: dir,
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
Convey("trigger repo not found in addImageIndexBlobsToReferences()", func() {
|
||||
err := gc.addIndexBlobsToReferences(repoName, ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
Digest: godigest.FromString("miss"),
|
||||
MediaType: ispec.MediaTypeImageIndex,
|
||||
},
|
||||
},
|
||||
}, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("trigger repo not found in addImageManifestBlobsToReferences()", func() {
|
||||
err := gc.addIndexBlobsToReferences(repoName, ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
Digest: godigest.FromString("miss"),
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
},
|
||||
},
|
||||
}, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("trigger repo not found in addORASImageManifestBlobsToReferences()", func() {
|
||||
err := gc.addIndexBlobsToReferences(repoName, ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
Digest: godigest.FromString("miss"),
|
||||
MediaType: artifactspec.MediaTypeArtifactManifest,
|
||||
},
|
||||
},
|
||||
}, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
content := []byte("this is a blob")
|
||||
digest := godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
|
||||
_, blen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blen, ShouldEqual, len(content))
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: digest,
|
||||
Size: int64(len(content)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 2
|
||||
|
||||
body, err := json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestDigest := godigest.FromBytes(body)
|
||||
|
||||
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, body)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("trigger GetIndex error in GetReferencedBlobs", func() {
|
||||
index, err := common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = gc.addIndexBlobsToReferences(repoName, index, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("trigger GetImageManifest error in AddIndexBlobsToReferences", func() {
|
||||
index, err := common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = gc.addIndexBlobsToReferences(repoName, index, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGarbageCollectIndexErrors(t *testing.T) {
|
||||
Convey("Make imagestore and upload manifest", t, func(c C) {
|
||||
dir := t.TempDir()
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||
RootDir: dir,
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
content := []byte("this is a blob")
|
||||
bdgst := godigest.FromBytes(content)
|
||||
So(bdgst, ShouldNotBeNil)
|
||||
|
||||
_, bsize, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), bdgst)
|
||||
So(err, ShouldBeNil)
|
||||
So(bsize, ShouldEqual, len(content))
|
||||
|
||||
var index ispec.Index
|
||||
index.SchemaVersion = 2
|
||||
index.MediaType = ispec.MediaTypeImageIndex
|
||||
|
||||
var digest godigest.Digest
|
||||
for i := 0; i < 4; i++ {
|
||||
// upload image config blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
buf := bytes.NewBuffer(cblob)
|
||||
buflen := buf.Len()
|
||||
blob, err := imgStore.PutBlobChunkStreamed(repoName, upload, buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
// create a manifest
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: bdgst,
|
||||
Size: bsize,
|
||||
},
|
||||
},
|
||||
}
|
||||
manifest.SchemaVersion = 2
|
||||
content, err = json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
digest = godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
_, _, err = imgStore.PutImageManifest(repoName, digest.String(), ispec.MediaTypeImageManifest, content)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
index.Manifests = append(index.Manifests, ispec.Descriptor{
|
||||
Digest: digest,
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Size: int64(len(content)),
|
||||
})
|
||||
}
|
||||
|
||||
// upload index image
|
||||
indexContent, err := json.Marshal(index)
|
||||
So(err, ShouldBeNil)
|
||||
indexDigest := godigest.FromBytes(indexContent)
|
||||
So(indexDigest, ShouldNotBeNil)
|
||||
|
||||
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageIndex, indexContent)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
index, err = common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = gc.addIndexBlobsToReferences(repoName, index, map[string]bool{})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("trigger GetImageIndex error in GetReferencedBlobsInImageIndex", func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
err = gc.addIndexBlobsToReferences(repoName, index, map[string]bool{})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGarbageCollectWithMockedImageStore(t *testing.T) {
|
||||
Convey("Cover gc error paths", t, func(c C) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
|
||||
Convey("Error on PutIndexContent in gc.cleanRepo()", func() {
|
||||
returnedIndexJSON := ispec.Index{}
|
||||
|
||||
returnedIndexJSONBuf, err := json.Marshal(returnedIndexJSON)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
PutIndexContentFn: func(repo string, index ispec.Index) error {
|
||||
return errGC
|
||||
},
|
||||
GetIndexContentFn: func(repo string) ([]byte, error) {
|
||||
return returnedIndexJSONBuf, nil
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.cleanBlobs() in gc.cleanRepo()", func() {
|
||||
returnedIndexJSON := ispec.Index{}
|
||||
|
||||
returnedIndexJSONBuf, err := json.Marshal(returnedIndexJSON)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
PutIndexContentFn: func(repo string, index ispec.Index) error {
|
||||
return nil
|
||||
},
|
||||
GetIndexContentFn: func(repo string) ([]byte, error) {
|
||||
return returnedIndexJSONBuf, nil
|
||||
},
|
||||
GetAllBlobsFn: func(repo string) ([]string, error) {
|
||||
return []string{}, errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("False on imgStore.DirExists() in gc.cleanRepo()", func() {
|
||||
imgStore := mocks.MockedImageStore{
|
||||
DirExistsFn: func(d string) bool {
|
||||
return false
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err := gc.cleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.identifyManifestsReferencedInIndex in gc.cleanManifests() with multiarch image", func() {
|
||||
indexImageDigest := godigest.FromBytes([]byte("digest"))
|
||||
|
||||
returnedIndexImage := ispec.Index{
|
||||
Subject: &ispec.DescriptorEmptyJSON,
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageIndex,
|
||||
Digest: godigest.FromBytes([]byte("digest2")),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
returnedIndexImageBuf, err := json.Marshal(returnedIndexImage)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
if digest == indexImageDigest {
|
||||
return returnedIndexImageBuf, nil
|
||||
} else {
|
||||
return nil, errGC
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: false,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanManifests(repoName, &ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageIndex,
|
||||
Digest: indexImageDigest,
|
||||
},
|
||||
},
|
||||
})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.identifyManifestsReferencedInIndex in gc.cleanManifests() with image", func() {
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return nil, errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: false,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err := gc.cleanManifests(repoName, &ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Digest: godigest.FromBytes([]byte("digest")),
|
||||
},
|
||||
},
|
||||
})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.gcManifest() in gc.cleanManifests() with image", func() {
|
||||
returnedImage := ispec.Manifest{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
}
|
||||
|
||||
returnedImageBuf, err := json.Marshal(returnedImage)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return returnedImageBuf, nil
|
||||
},
|
||||
}
|
||||
|
||||
metaDB := mocks.MetaDBMock{
|
||||
RemoveRepoReferenceFn: func(repo, reference string, manifestDigest godigest.Digest) error {
|
||||
return errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, metaDB, Options{
|
||||
Referrers: false,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanManifests(repoName, &ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Digest: godigest.FromBytes([]byte("digest")),
|
||||
},
|
||||
},
|
||||
})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("Error on gc.gcManifest() in gc.cleanManifests() with signature", func() {
|
||||
returnedImage := ispec.Manifest{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
ArtifactType: zcommon.NotationSignature,
|
||||
}
|
||||
|
||||
returnedImageBuf, err := json.Marshal(returnedImage)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return returnedImageBuf, nil
|
||||
},
|
||||
}
|
||||
|
||||
metaDB := mocks.MetaDBMock{
|
||||
DeleteSignatureFn: func(repo string, signedManifestDigest godigest.Digest, sm types.SignatureMetadata) error {
|
||||
return errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, metaDB, Options{
|
||||
Referrers: false,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
desc := ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Digest: godigest.FromBytes([]byte("digest")),
|
||||
}
|
||||
|
||||
index := &ispec.Index{
|
||||
Manifests: []ispec.Descriptor{desc},
|
||||
}
|
||||
_, err = gc.removeManifest(repoName, index, desc, storage.NotationType,
|
||||
godigest.FromBytes([]byte("digest2")))
|
||||
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.gcReferrer() in gc.cleanManifests() with image index", func() {
|
||||
manifestDesc := ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageIndex,
|
||||
Digest: godigest.FromBytes([]byte("digest")),
|
||||
}
|
||||
|
||||
returnedIndexImage := ispec.Index{
|
||||
MediaType: ispec.MediaTypeImageIndex,
|
||||
Subject: &ispec.Descriptor{
|
||||
Digest: godigest.FromBytes([]byte("digest2")),
|
||||
},
|
||||
Manifests: []ispec.Descriptor{
|
||||
manifestDesc,
|
||||
},
|
||||
}
|
||||
|
||||
returnedIndexImageBuf, err := json.Marshal(returnedIndexImage)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return returnedIndexImageBuf, nil
|
||||
},
|
||||
StatBlobFn: func(repo string, digest godigest.Digest) (bool, int64, time.Time, error) {
|
||||
return false, -1, time.Time{}, errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanManifests(repoName, &returnedIndexImage)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Error on gc.gcReferrer() in gc.cleanManifests() with image", func() {
|
||||
manifestDesc := ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
Digest: godigest.FromBytes([]byte("digest")),
|
||||
}
|
||||
|
||||
returnedImage := ispec.Manifest{
|
||||
Subject: &ispec.Descriptor{
|
||||
Digest: godigest.FromBytes([]byte("digest2")),
|
||||
},
|
||||
MediaType: ispec.MediaTypeImageManifest,
|
||||
}
|
||||
|
||||
returnedImageBuf, err := json.Marshal(returnedImage)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
imgStore := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return returnedImageBuf, nil
|
||||
},
|
||||
StatBlobFn: func(repo string, digest godigest.Digest) (bool, int64, time.Time, error) {
|
||||
return false, -1, time.Time{}, errGC
|
||||
},
|
||||
}
|
||||
|
||||
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
err = gc.cleanManifests(repoName, &ispec.Index{
|
||||
Manifests: []ispec.Descriptor{
|
||||
manifestDesc,
|
||||
},
|
||||
})
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
@ -41,19 +41,15 @@ const (
|
||||
|
||||
// ImageStore provides the image storage operations.
|
||||
type ImageStore struct {
|
||||
rootDir string
|
||||
storeDriver storageTypes.Driver
|
||||
lock *sync.RWMutex
|
||||
log zlog.Logger
|
||||
metrics monitoring.MetricServer
|
||||
cache cache.Cache
|
||||
dedupe bool
|
||||
linter common.Lint
|
||||
commit bool
|
||||
gc bool
|
||||
gcReferrers bool
|
||||
gcDelay time.Duration
|
||||
retentionDelay time.Duration
|
||||
rootDir string
|
||||
storeDriver storageTypes.Driver
|
||||
lock *sync.RWMutex
|
||||
log zlog.Logger
|
||||
metrics monitoring.MetricServer
|
||||
cache cache.Cache
|
||||
dedupe bool
|
||||
linter common.Lint
|
||||
commit bool
|
||||
}
|
||||
|
||||
func (is *ImageStore) RootDir() string {
|
||||
@ -67,9 +63,8 @@ func (is *ImageStore) DirExists(d string) bool {
|
||||
// NewImageStore returns a new image store backed by cloud storages.
|
||||
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
|
||||
// Use the last argument to properly set a cache database, or it will default to boltDB local storage.
|
||||
func NewImageStore(rootDir string, cacheDir string, gc bool, gcReferrers bool, gcDelay time.Duration,
|
||||
untaggedImageRetentionDelay time.Duration, dedupe, commit bool, log zlog.Logger, metrics monitoring.MetricServer,
|
||||
linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache,
|
||||
func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger,
|
||||
metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache,
|
||||
) storageTypes.ImageStore {
|
||||
if err := storeDriver.EnsureDir(rootDir); err != nil {
|
||||
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
|
||||
@ -78,19 +73,15 @@ func NewImageStore(rootDir string, cacheDir string, gc bool, gcReferrers bool, g
|
||||
}
|
||||
|
||||
imgStore := &ImageStore{
|
||||
rootDir: rootDir,
|
||||
storeDriver: storeDriver,
|
||||
lock: &sync.RWMutex{},
|
||||
log: log,
|
||||
metrics: metrics,
|
||||
dedupe: dedupe,
|
||||
linter: linter,
|
||||
commit: commit,
|
||||
gc: gc,
|
||||
gcReferrers: gcReferrers,
|
||||
gcDelay: gcDelay,
|
||||
retentionDelay: untaggedImageRetentionDelay,
|
||||
cache: cacheDriver,
|
||||
rootDir: rootDir,
|
||||
storeDriver: storeDriver,
|
||||
lock: &sync.RWMutex{},
|
||||
log: log,
|
||||
metrics: metrics,
|
||||
dedupe: dedupe,
|
||||
linter: linter,
|
||||
commit: commit,
|
||||
cache: cacheDriver,
|
||||
}
|
||||
|
||||
return imgStore
|
||||
@ -342,6 +333,12 @@ func (is *ImageStore) GetNextRepository(repo string) (string, error) {
|
||||
|
||||
_, err := is.storeDriver.List(dir)
|
||||
if err != nil {
|
||||
if errors.As(err, &driver.PathNotFoundError{}) {
|
||||
is.log.Debug().Msg("empty rootDir")
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
is.log.Error().Err(err).Msg("failure walking storage root-dir")
|
||||
|
||||
return "", err
|
||||
@ -574,15 +571,6 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
|
||||
|
||||
// now update "index.json"
|
||||
index.Manifests = append(index.Manifests, desc)
|
||||
dir = path.Join(is.rootDir, repo)
|
||||
indexPath := path.Join(dir, "index.json")
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// update the descriptors artifact type in order to check for signatures when applying the linter
|
||||
desc.ArtifactType = artifactType
|
||||
@ -595,9 +583,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil {
|
||||
is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
|
||||
|
||||
if err := is.PutIndexContent(repo, index); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@ -613,18 +599,10 @@ func (is *ImageStore) DeleteImageManifest(repo, reference string, detectCollisio
|
||||
|
||||
var lockLatency time.Time
|
||||
|
||||
var err error
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer func() {
|
||||
is.Unlock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
if err == nil {
|
||||
monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
|
||||
}
|
||||
}()
|
||||
|
||||
err = is.deleteImageManifest(repo, reference, detectCollisions)
|
||||
err := is.deleteImageManifest(repo, reference, detectCollisions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -633,6 +611,8 @@ func (is *ImageStore) DeleteImageManifest(repo, reference string, detectCollisio
|
||||
}
|
||||
|
||||
func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisions bool) error {
|
||||
defer monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
|
||||
|
||||
index, err := common.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1173,7 +1153,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6
|
||||
return true, blobSize, nil
|
||||
}
|
||||
|
||||
// StatBlob verifies if a blob is present inside a repository. The caller function SHOULD lock from outside.
|
||||
// StatBlob verifies if a blob is present inside a repository. The caller function MUST lock from outside.
|
||||
func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64, time.Time, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return false, -1, time.Time{}, err
|
||||
@ -1398,7 +1378,7 @@ func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType str
|
||||
return blobReadCloser, binfo.Size(), nil
|
||||
}
|
||||
|
||||
// GetBlobContent returns blob contents, the caller function SHOULD lock from outside.
|
||||
// GetBlobContent returns blob contents, the caller function MUST lock from outside.
|
||||
func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return []byte{}, err
|
||||
@ -1463,7 +1443,7 @@ func (is *ImageStore) GetOrasReferrers(repo string, gdigest godigest.Digest, art
|
||||
return common.GetOrasReferrers(is, repo, gdigest, artifactType, is.log)
|
||||
}
|
||||
|
||||
// GetIndexContent returns index.json contents, the caller function SHOULD lock from outside.
|
||||
// GetIndexContent returns index.json contents, the caller function MUST lock from outside.
|
||||
func (is *ImageStore) GetIndexContent(repo string) ([]byte, error) {
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
@ -1483,6 +1463,27 @@ func (is *ImageStore) GetIndexContent(repo string) ([]byte, error) {
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error {
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
indexPath := path.Join(dir, "index.json")
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteBlob removes the blob from the repository.
|
||||
func (is *ImageStore) DeleteBlob(repo string, digest godigest.Digest) error {
|
||||
var lockLatency time.Time
|
||||
@ -1497,6 +1498,56 @@ func (is *ImageStore) DeleteBlob(repo string, digest godigest.Digest) error {
|
||||
return is.deleteBlob(repo, digest)
|
||||
}
|
||||
|
||||
/*
|
||||
CleanupRepo removes blobs from the repository and removes repo if flag is true and all blobs were removed
|
||||
the caller function MUST lock from outside.
|
||||
*/
|
||||
func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) {
|
||||
count := 0
|
||||
|
||||
for _, digest := range blobs {
|
||||
is.log.Debug().Str("repository", repo).Str("digest", digest.String()).Msg("perform GC on blob")
|
||||
|
||||
if err := is.deleteBlob(repo, digest); err != nil {
|
||||
if errors.Is(err, zerr.ErrBlobReferenced) {
|
||||
if err := is.deleteImageManifest(repo, digest.String(), true); err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestConflict) || errors.Is(err, zerr.ErrManifestReferenced) {
|
||||
continue
|
||||
}
|
||||
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete manifest")
|
||||
|
||||
return count, err
|
||||
}
|
||||
|
||||
count++
|
||||
} else {
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete blob")
|
||||
|
||||
return count, err
|
||||
}
|
||||
} else {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir))
|
||||
if err != nil {
|
||||
is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir")
|
||||
}
|
||||
|
||||
// if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded.
|
||||
if removeRepo && count == len(blobs) && count > 0 && len(blobUploads) == 0 {
|
||||
if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {
|
||||
is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo")
|
||||
|
||||
return count, err
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
@ -1573,362 +1624,17 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) garbageCollect(repo string) error {
|
||||
if is.gcReferrers {
|
||||
is.log.Info().Msg("gc: manifests with missing referrers")
|
||||
|
||||
// gc all manifests that have a missing subject, stop when no gc happened in a full loop over index.json.
|
||||
stop := false
|
||||
for !stop {
|
||||
// because we gc manifests in the loop, need to get latest index.json content
|
||||
index, err := common.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gced, err := is.garbageCollectIndexReferrers(repo, index, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* if we delete any manifest then loop again and gc manifests with
|
||||
a subject pointing to the last ones which were gc'ed. */
|
||||
stop = !gced
|
||||
}
|
||||
}
|
||||
|
||||
index, err := common.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Info().Msg("gc: manifests without tags")
|
||||
|
||||
// apply image retention policy
|
||||
if err := is.garbageCollectUntaggedManifests(index, repo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Info().Msg("gc: blobs")
|
||||
|
||||
if err := is.garbageCollectBlobs(is, repo, is.gcDelay, is.log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
garbageCollectIndexReferrers will gc all referrers with a missing subject recursively
|
||||
|
||||
rootIndex is indexJson, need to pass it down to garbageCollectReferrer()
|
||||
rootIndex is the place we look for referrers.
|
||||
*/
|
||||
func (is *ImageStore) garbageCollectIndexReferrers(repo string, rootIndex ispec.Index, index ispec.Index,
|
||||
) (bool, error) {
|
||||
var count int
|
||||
|
||||
var err error
|
||||
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
indexImage, err := common.GetImageIndex(is, repo, desc.Digest, is.log)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read multiarch(index) image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
gced, err := is.garbageCollectReferrer(repo, rootIndex, desc, indexImage.Subject)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
/* if we gc index then no need to continue searching for referrers inside it.
|
||||
they will be gced when the next garbage collect is executed(if they are older than retentionDelay),
|
||||
because manifests part of indexes will still be referenced in index.json */
|
||||
if gced {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if gced, err = is.garbageCollectIndexReferrers(repo, rootIndex, indexImage); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if gced {
|
||||
count++
|
||||
}
|
||||
case ispec.MediaTypeImageManifest, artifactspec.MediaTypeArtifactManifest:
|
||||
image, err := common.GetImageManifest(is, repo, desc.Digest, is.log)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
gced, err := is.garbageCollectReferrer(repo, rootIndex, desc, image.Subject)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if gced {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
func (is *ImageStore) garbageCollectReferrer(repo string, index ispec.Index, manifestDesc ispec.Descriptor,
|
||||
subject *ispec.Descriptor,
|
||||
) (bool, error) {
|
||||
var gced bool
|
||||
|
||||
var err error
|
||||
|
||||
if subject != nil {
|
||||
// try to find subject in index.json
|
||||
if ok := isManifestReferencedInIndex(index, subject.Digest); !ok {
|
||||
gced, err = garbageCollectManifest(is, repo, manifestDesc.Digest, is.gcDelay)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tag, ok := manifestDesc.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
if strings.HasPrefix(tag, "sha256-") && (strings.HasSuffix(tag, cosignSignatureTagSuffix) ||
|
||||
strings.HasSuffix(tag, SBOMTagSuffix)) {
|
||||
if ok := isManifestReferencedInIndex(index, getSubjectFromCosignTag(tag)); !ok {
|
||||
gced, err = garbageCollectManifest(is, repo, manifestDesc.Digest, is.gcDelay)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gced, err
|
||||
}
|
||||
|
||||
func (is *ImageStore) garbageCollectUntaggedManifests(index ispec.Index, repo string) error {
|
||||
referencedByImageIndex := make([]string, 0)
|
||||
|
||||
if err := identifyManifestsReferencedInIndex(is, index, repo, &referencedByImageIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// first gather manifests part of image indexes and referrers, we want to skip checking them
|
||||
for _, desc := range index.Manifests {
|
||||
// skip manifests referenced in image indexes
|
||||
if zcommon.Contains(referencedByImageIndex, desc.Digest.String()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove untagged images
|
||||
if desc.MediaType == ispec.MediaTypeImageManifest || desc.MediaType == ispec.MediaTypeImageIndex {
|
||||
_, ok := desc.Annotations[ispec.AnnotationRefName]
|
||||
if !ok {
|
||||
_, err := garbageCollectManifest(is, repo, desc.Digest, is.retentionDelay)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds both referenced manifests and referrers from an index.
|
||||
func identifyManifestsReferencedInIndex(imgStore *ImageStore, index ispec.Index, repo string, referenced *[]string,
|
||||
) error {
|
||||
for _, desc := range index.Manifests {
|
||||
switch desc.MediaType {
|
||||
case ispec.MediaTypeImageIndex:
|
||||
indexImage, err := common.GetImageIndex(imgStore, repo, desc.Digest, imgStore.log)
|
||||
if err != nil {
|
||||
imgStore.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read multiarch(index) image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if indexImage.Subject != nil {
|
||||
*referenced = append(*referenced, desc.Digest.String())
|
||||
}
|
||||
|
||||
for _, indexDesc := range indexImage.Manifests {
|
||||
*referenced = append(*referenced, indexDesc.Digest.String())
|
||||
}
|
||||
|
||||
if err := identifyManifestsReferencedInIndex(imgStore, indexImage, repo, referenced); err != nil {
|
||||
return err
|
||||
}
|
||||
case ispec.MediaTypeImageManifest, artifactspec.MediaTypeArtifactManifest:
|
||||
image, err := common.GetImageManifest(imgStore, repo, desc.Digest, imgStore.log)
|
||||
if err != nil {
|
||||
imgStore.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if image.Subject != nil {
|
||||
*referenced = append(*referenced, desc.Digest.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func garbageCollectManifest(imgStore *ImageStore, repo string, digest godigest.Digest, delay time.Duration,
|
||||
) (bool, error) {
|
||||
canGC, err := isBlobOlderThan(imgStore, repo, digest, delay, imgStore.log)
|
||||
if err != nil {
|
||||
imgStore.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).
|
||||
Str("delay", imgStore.gcDelay.String()).Msg("gc: failed to check if blob is older than delay")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if canGC {
|
||||
imgStore.log.Info().Str("repository", repo).Str("digest", digest.String()).
|
||||
Msg("gc: removing unreferenced manifest")
|
||||
|
||||
if err := imgStore.deleteImageManifest(repo, digest.String(), true); err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestConflict) {
|
||||
imgStore.log.Info().Str("repository", repo).Str("digest", digest.String()).
|
||||
Msg("gc: skipping removing manifest due to conflict")
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) garbageCollectBlobs(imgStore *ImageStore, repo string,
|
||||
delay time.Duration, log zlog.Logger,
|
||||
) error {
|
||||
refBlobs := map[string]bool{}
|
||||
|
||||
err := common.AddRepoBlobsToReferences(imgStore, repo, refBlobs, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("unable to get referenced blobs in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
allBlobs, err := imgStore.GetAllBlobs(repo)
|
||||
if err != nil {
|
||||
// /blobs/sha256/ may be empty in the case of s3, no need to return err, we want to skip
|
||||
if errors.As(err, &driver.PathNotFoundError{}) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Error().Err(err).Str("repository", repo).Msg("unable to get all blobs")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
reaped := 0
|
||||
|
||||
for _, blob := range allBlobs {
|
||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
|
||||
if err = digest.Validate(); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("unable to parse digest")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := refBlobs[digest.String()]; !ok {
|
||||
ok, err := isBlobOlderThan(imgStore, repo, digest, delay, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("unable to determine GC delay")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := imgStore.deleteBlob(repo, digest); err != nil {
|
||||
if errors.Is(err, zerr.ErrBlobReferenced) {
|
||||
if err := imgStore.deleteImageManifest(repo, digest.String(), true); err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestConflict) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("unable to delete blob")
|
||||
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("unable to delete blob")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("repository", repo).Str("digest", blob).Msg("garbage collected blob")
|
||||
|
||||
reaped++
|
||||
}
|
||||
}
|
||||
|
||||
blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir))
|
||||
if err != nil {
|
||||
is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir")
|
||||
}
|
||||
|
||||
// if we cleaned all blobs let's also remove the repo so that it won't be returned by catalog
|
||||
if len(allBlobs) > 0 && reaped == len(allBlobs) && len(blobUploads) == 0 {
|
||||
log.Info().Str("repository", repo).Msg("garbage collected all blobs, cleaning repo...")
|
||||
|
||||
if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("unable to delete repo")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("repository", repo).Int("count", reaped).Msg("garbage collected blobs")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) gcRepo(repo string) error {
|
||||
var lockLatency time.Time
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
err := is.garbageCollect(repo)
|
||||
is.Unlock(&lockLatency)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
|
||||
dir := path.Join(is.rootDir, repo, "blobs", "sha256")
|
||||
|
||||
files, err := is.storeDriver.List(dir)
|
||||
if err != nil {
|
||||
if errors.As(err, &driver.PathNotFoundError{}) {
|
||||
is.log.Debug().Msg("empty rootDir")
|
||||
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
@ -1941,30 +1647,6 @@ func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) RunGCRepo(repo string) error {
|
||||
is.log.Info().Msg(fmt.Sprintf("executing GC of orphaned blobs for %s", path.Join(is.RootDir(), repo)))
|
||||
|
||||
if err := is.gcRepo(repo); err != nil {
|
||||
errMessage := fmt.Sprintf("error while running GC for %s", path.Join(is.RootDir(), repo))
|
||||
is.log.Error().Err(err).Msg(errMessage)
|
||||
is.log.Info().Msg(fmt.Sprintf("GC unsuccessfully completed for %s", path.Join(is.RootDir(), repo)))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Info().Msg(fmt.Sprintf("GC successfully completed for %s", path.Join(is.RootDir(), repo)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) RunGCPeriodically(interval time.Duration, sch *scheduler.Scheduler) {
|
||||
generator := &common.GCTaskGenerator{
|
||||
ImgStore: is,
|
||||
}
|
||||
|
||||
sch.SubmitGenerator(generator, interval, scheduler.MediumPriority)
|
||||
}
|
||||
|
||||
func (is *ImageStore) GetNextDigestWithBlobPaths(lastDigests []godigest.Digest) (godigest.Digest, []string, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
@ -2217,40 +1899,3 @@ func (bs *blobStream) Read(buf []byte) (int, error) {
|
||||
func (bs *blobStream) Close() error {
|
||||
return bs.closer.Close()
|
||||
}
|
||||
|
||||
func isBlobOlderThan(imgStore storageTypes.ImageStore, repo string,
|
||||
digest godigest.Digest, delay time.Duration, log zlog.Logger,
|
||||
) (bool, error) {
|
||||
_, _, modtime, err := imgStore.StatBlob(repo, digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).
|
||||
Msg("gc: failed to stat blob")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if modtime.Add(delay).After(time.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info().Str("repository", repo).Str("digest", digest.String()).Msg("perform GC on blob")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func getSubjectFromCosignTag(tag string) godigest.Digest {
|
||||
alg := strings.Split(tag, "-")[0]
|
||||
encoded := strings.Split(strings.Split(tag, "-")[1], ".sig")[0]
|
||||
|
||||
return godigest.NewDigestFromEncoded(godigest.Algorithm(alg), encoded)
|
||||
}
|
||||
|
||||
func isManifestReferencedInIndex(index ispec.Index, digest godigest.Digest) bool {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.Digest == digest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -287,7 +287,18 @@ func (driver *Driver) Link(src, dest string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return driver.formatErr(os.Link(src, dest))
|
||||
if err := os.Link(src, dest); err != nil {
|
||||
return driver.formatErr(err)
|
||||
}
|
||||
|
||||
/* also update the modtime, so that gc won't remove recently linked blobs
|
||||
otherwise ifBlobOlderThan(gcDelay) will return the modtime of the inode */
|
||||
currentTime := time.Now().Local()
|
||||
if err := os.Chtimes(dest, currentTime, currentTime); err != nil {
|
||||
return driver.formatErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (driver *Driver) formatErr(err error) error {
|
||||
|
@ -1,8 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
zlog "zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
@ -13,17 +11,12 @@ import (
|
||||
|
||||
// NewImageStore returns a new image store backed by a file storage.
|
||||
// Use the last argument to properly set a cache database, or it will default to boltDB local storage.
|
||||
func NewImageStore(rootDir string, gc bool, gcReferrers bool, gcDelay time.Duration,
|
||||
untaggedImageRetentionDelay time.Duration, dedupe, commit bool,
|
||||
log zlog.Logger, metrics monitoring.MetricServer, linter common.Lint, cacheDriver cache.Cache,
|
||||
func NewImageStore(rootDir string, dedupe, commit bool, log zlog.Logger,
|
||||
metrics monitoring.MetricServer, linter common.Lint, cacheDriver cache.Cache,
|
||||
) storageTypes.ImageStore {
|
||||
return imagestore.NewImageStore(
|
||||
rootDir,
|
||||
rootDir,
|
||||
gc,
|
||||
gcReferrers,
|
||||
gcDelay,
|
||||
untaggedImageRetentionDelay,
|
||||
dedupe,
|
||||
commit,
|
||||
log,
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
)
|
||||
|
||||
@ -36,8 +35,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
upload, err := imgStore.NewBlobUpload("dedupe1")
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/gc"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
storageTypes "zotregistry.io/zot/pkg/storage/types"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
@ -69,8 +70,7 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
Convey("Bad image manifest", func() {
|
||||
@ -207,8 +207,7 @@ func TestGetOrasReferrers(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("Get referrers", t, func(c C) {
|
||||
err := test.WriteImageToFileSystem(CreateDefaultVulnerableImage(), "zot-test", "0.0.1", storage.StoreController{
|
||||
@ -267,8 +266,7 @@ func FuzzNewBlobUpload(f *testing.F) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
_, err := imgStore.NewBlobUpload(data)
|
||||
if err != nil {
|
||||
@ -293,8 +291,7 @@ func FuzzPutBlobChunk(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
repoName := data
|
||||
uuid, err := imgStore.NewBlobUpload(repoName)
|
||||
@ -327,8 +324,7 @@ func FuzzPutBlobChunkStreamed(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -360,8 +356,7 @@ func FuzzGetBlobUpload(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil,
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil,
|
||||
cacheDriver)
|
||||
|
||||
_, err := imgStore.GetBlobUpload(data1, data2)
|
||||
@ -387,8 +382,7 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
|
||||
@ -439,8 +433,7 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
|
||||
@ -498,8 +491,7 @@ func FuzzTestDeleteImageManifest(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
digest, _, err := newRandomBlobForFuzz(data)
|
||||
if err != nil {
|
||||
@ -534,8 +526,7 @@ func FuzzInitRepo(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
err := imgStore.InitRepo(data)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
@ -559,8 +550,7 @@ func FuzzInitValidateRepo(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
err := imgStore.InitRepo(data)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
@ -591,8 +581,7 @@ func FuzzGetImageTags(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
_, err := imgStore.GetImageTags(data)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoNotFound) || isKnownErr(err) {
|
||||
@ -616,8 +605,7 @@ func FuzzBlobUploadPath(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
_ = imgStore.BlobUploadPath(repo, uuid)
|
||||
})
|
||||
@ -636,8 +624,7 @@ func FuzzBlobUploadInfo(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
repo := data
|
||||
|
||||
_, err := imgStore.BlobUploadInfo(repo, uuid)
|
||||
@ -662,8 +649,7 @@ func FuzzTestGetImageManifest(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -691,8 +677,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -741,8 +726,7 @@ func FuzzFullBlobUpload(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
ldigest, lblob, err := newRandomBlobForFuzz(data)
|
||||
if err != nil {
|
||||
@ -773,20 +757,18 @@ func TestStorageCacheErrors(t *testing.T) {
|
||||
|
||||
getBlobPath := ""
|
||||
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
if strings.Contains(path, dedupedRepo) {
|
||||
return errCache
|
||||
}
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
if strings.Contains(path, dedupedRepo) {
|
||||
return errCache
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
GetBlobFn: func(digest godigest.Digest) (string, error) {
|
||||
return getBlobPath, nil
|
||||
},
|
||||
})
|
||||
return nil
|
||||
},
|
||||
GetBlobFn: func(digest godigest.Digest) (string, error) {
|
||||
return getBlobPath, nil
|
||||
},
|
||||
})
|
||||
|
||||
err := imgStore.InitRepo(originRepo)
|
||||
So(err, ShouldBeNil)
|
||||
@ -816,8 +798,7 @@ func FuzzDedupeBlob(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
blobDigest := godigest.FromString(data)
|
||||
|
||||
@ -858,8 +839,7 @@ func FuzzDeleteBlobUpload(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
uuid, err := imgStore.NewBlobUpload(repoName)
|
||||
if err != nil {
|
||||
@ -890,8 +870,7 @@ func FuzzBlobPath(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_ = imgStore.BlobPath(repoName, digest)
|
||||
@ -912,8 +891,7 @@ func FuzzCheckBlob(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
@ -944,8 +922,7 @@ func FuzzGetBlob(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
@ -983,8 +960,7 @@ func FuzzDeleteBlob(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
@ -1019,8 +995,7 @@ func FuzzGetIndexContent(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
@ -1055,8 +1030,7 @@ func FuzzGetBlobContent(f *testing.F) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
@ -1091,8 +1065,7 @@ func FuzzGetOrasReferrers(f *testing.F) {
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver)
|
||||
|
||||
storageCtlr := storage.StoreController{DefaultStore: imgStore}
|
||||
err := test.WriteImageToFileSystem(CreateDefaultVulnerableImage(), "zot-test", "0.0.1", storageCtlr)
|
||||
@ -1143,8 +1116,8 @@ func FuzzGetOrasReferrers(f *testing.F) {
|
||||
|
||||
func FuzzRunGCRepo(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data string) {
|
||||
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, *log)
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
@ -1152,11 +1125,16 @@ func FuzzRunGCRepo(f *testing.F) {
|
||||
RootDir: dir,
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, *log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, *log, metrics, nil, cacheDriver)
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
if err := imgStore.RunGCRepo(data); err != nil {
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
if err := gc.CleanRepo(data); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
@ -1193,11 +1171,9 @@ func TestDedupeLinks(t *testing.T) {
|
||||
var imgStore storageTypes.ImageStore
|
||||
|
||||
if testCase.dedupe {
|
||||
imgStore = local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, testCase.dedupe, true, log, metrics, nil, cacheDriver)
|
||||
imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, cacheDriver)
|
||||
} else {
|
||||
imgStore = local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, testCase.dedupe, true, log, metrics, nil, nil)
|
||||
imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, nil)
|
||||
}
|
||||
|
||||
// manifest1
|
||||
@ -1346,8 +1322,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
|
||||
Convey("test RunDedupeForDigest directly, trigger stat error on original blob", func() {
|
||||
// rebuild with dedupe true
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
duplicateBlobs := []string{
|
||||
path.Join(dir, "dedupe1", "blobs", "sha256", blobDigest1),
|
||||
@ -1366,8 +1341,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
taskScheduler, cancel := runAndGetScheduler()
|
||||
// rebuild with dedupe true
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
|
||||
sleepValue := i * 50
|
||||
@ -1379,8 +1353,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
taskScheduler, cancel := runAndGetScheduler()
|
||||
|
||||
// rebuild with dedupe true
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
|
||||
|
||||
// wait until rebuild finishes
|
||||
@ -1399,8 +1372,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
// switch dedupe to true from false
|
||||
taskScheduler, cancel := runAndGetScheduler()
|
||||
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, nil)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, nil)
|
||||
|
||||
// rebuild with dedupe true
|
||||
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
|
||||
@ -1422,16 +1394,14 @@ func TestDedupeLinks(t *testing.T) {
|
||||
// switch dedupe to true from false
|
||||
taskScheduler, cancel := runAndGetScheduler()
|
||||
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
HasBlobFn: func(digest godigest.Digest, path string) bool {
|
||||
return false
|
||||
},
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
return errCache
|
||||
},
|
||||
})
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
HasBlobFn: func(digest godigest.Digest, path string) bool {
|
||||
return false
|
||||
},
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
return errCache
|
||||
},
|
||||
})
|
||||
// rebuild with dedupe true, should have samefile blobs
|
||||
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
|
||||
// wait until rebuild finishes
|
||||
@ -1452,20 +1422,18 @@ func TestDedupeLinks(t *testing.T) {
|
||||
// switch dedupe to true from false
|
||||
taskScheduler, cancel := runAndGetScheduler()
|
||||
|
||||
imgStore := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
HasBlobFn: func(digest godigest.Digest, path string) bool {
|
||||
return false
|
||||
},
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
if strings.Contains(path, "dedupe2") {
|
||||
return errCache
|
||||
}
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, &mocks.CacheMock{
|
||||
HasBlobFn: func(digest godigest.Digest, path string) bool {
|
||||
return false
|
||||
},
|
||||
PutBlobFn: func(digest godigest.Digest, path string) error {
|
||||
if strings.Contains(path, "dedupe2") {
|
||||
return errCache
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return nil
|
||||
},
|
||||
})
|
||||
// rebuild with dedupe true, should have samefile blobs
|
||||
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
|
||||
// wait until rebuild finishes
|
||||
@ -1549,8 +1517,7 @@ func TestDedupe(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
il := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
il := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
|
||||
})
|
||||
@ -1570,8 +1537,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
So(local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
So(local.NewImageStore(dir, true,
|
||||
true, log, metrics, nil, cacheDriver), ShouldNotBeNil)
|
||||
if os.Geteuid() != 0 {
|
||||
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||
@ -1579,9 +1545,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
So(local.NewImageStore("/deadBEEF", true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
true, log, metrics, nil, cacheDriver), ShouldBeNil)
|
||||
So(local.NewImageStore("/deadBEEF", true, true, log, metrics, nil, cacheDriver), ShouldBeNil)
|
||||
}
|
||||
})
|
||||
|
||||
@ -1596,8 +1560,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
err := os.Chmod(dir, 0o000) // remove all perms
|
||||
if err != nil {
|
||||
@ -1647,8 +1610,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -1760,8 +1722,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -1786,8 +1747,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -1834,8 +1794,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
So(imgStore.InitRepo("test"), ShouldBeNil)
|
||||
@ -2006,8 +1965,7 @@ func TestInjectWriteFile(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, false, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, false, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("Failure path not reached", func() {
|
||||
err := imgStore.InitRepo("repo1")
|
||||
@ -2033,11 +1991,17 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "gc-all-repos-short"
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: 1 * time.Second,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
image := CreateDefaultVulnerableImage()
|
||||
|
||||
err := test.WriteImageToFileSystem(image, repoName, "0.0.1", storage.StoreController{
|
||||
DefaultStore: imgStore,
|
||||
})
|
||||
@ -2049,7 +2013,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
@ -2073,11 +2037,17 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "gc-all-repos-short"
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: 1 * time.Second,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
image := CreateDefaultVulnerableImage()
|
||||
|
||||
err := test.WriteImageToFileSystem(image, repoName, "0.0.1", storage.StoreController{
|
||||
DefaultStore: imgStore,
|
||||
})
|
||||
@ -2085,7 +2055,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
|
||||
So(os.Chmod(path.Join(dir, repoName, "index.json"), 0o000), ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
@ -2109,10 +2079,15 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "gc-sig"
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: 1 * time.Second,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||
img := CreateRandomImage()
|
||||
|
||||
@ -2152,7 +2127,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
err = test.WriteImageToFileSystem(notationSig, repoName, "notation", storeController)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
@ -2170,13 +2145,18 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, 1*time.Second,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
DefaultStore: imgStore,
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: 1 * time.Second,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
unsupportedMediaType := "application/vnd.oci.artifact.manifest.v1+json"
|
||||
|
||||
img := CreateRandomImage()
|
||||
@ -2259,7 +2239,7 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
Convey("Garbage collect blobs referenced by manifest with unsupported media type", func() {
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, img.DigestStr())
|
||||
@ -2296,7 +2276,7 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, img.DigestStr(), true)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, img.DigestStr())
|
||||
@ -2343,10 +2323,15 @@ func TestGarbageCollectErrors(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, 500*time.Millisecond,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "gc-index"
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: 500 * time.Millisecond,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
// create a blob/layer
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2433,7 +2418,7 @@ func TestGarbageCollectErrors(t *testing.T) {
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2484,14 +2469,14 @@ func TestGarbageCollectErrors(t *testing.T) {
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// trigger Unmarshal error
|
||||
_, err = os.Create(imgStore.BlobPath(repoName, digest))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2541,7 +2526,7 @@ func TestGarbageCollectErrors(t *testing.T) {
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// blob shouldn't be gc'ed
|
||||
@ -2580,8 +2565,7 @@ func TestInitRepo(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2603,8 +2587,7 @@ func TestValidateRepo(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2624,8 +2607,7 @@ func TestValidateRepo(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
_, err := imgStore.ValidateRepo(".")
|
||||
So(err, ShouldNotBeNil)
|
||||
@ -2670,9 +2652,7 @@ func TestGetRepositories(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver,
|
||||
)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
// Create valid directory with permissions
|
||||
err := os.Mkdir(path.Join(dir, "test-dir"), 0o755) //nolint: gosec
|
||||
@ -2767,9 +2747,7 @@ func TestGetRepositories(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver,
|
||||
)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
// Root dir does not contain repos
|
||||
repos, err := imgStore.GetRepositories()
|
||||
@ -2815,8 +2793,7 @@ func TestGetRepositories(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(rootDir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
imgStore := local.NewImageStore(rootDir,
|
||||
true, true, log, metrics, nil, cacheDriver,
|
||||
)
|
||||
|
||||
@ -2860,9 +2837,7 @@ func TestGetNextRepository(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver,
|
||||
)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
firstRepoName := "repo1"
|
||||
secondRepoName := "repo2"
|
||||
|
||||
@ -2915,8 +2890,7 @@ func TestPutBlobChunkStreamed(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
uuid, err := imgStore.NewBlobUpload("test")
|
||||
So(err, ShouldBeNil)
|
||||
@ -2945,8 +2919,7 @@ func TestPullRange(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
repoName := "pull-range"
|
||||
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
@ -2994,8 +2967,7 @@ func TestStorageDriverErr(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("Init repo", t, func() {
|
||||
err := imgStore.InitRepo(repoName)
|
||||
|
@ -1,8 +1,6 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
// Add s3 support.
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
// Load s3 driver.
|
||||
@ -19,17 +17,12 @@ import (
|
||||
// NewObjectStorage returns a new image store backed by cloud storages.
|
||||
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
|
||||
// Use the last argument to properly set a cache database, or it will default to boltDB local storage.
|
||||
func NewImageStore(rootDir string, cacheDir string, gc bool, gcReferrers bool, gcDelay time.Duration,
|
||||
untaggedImageRetentionDelay time.Duration, dedupe, commit bool, log zlog.Logger, metrics monitoring.MetricServer,
|
||||
linter common.Lint, store driver.StorageDriver, cacheDriver cache.Cache,
|
||||
func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger,
|
||||
metrics monitoring.MetricServer, linter common.Lint, store driver.StorageDriver, cacheDriver cache.Cache,
|
||||
) storageTypes.ImageStore {
|
||||
return imagestore.NewImageStore(
|
||||
rootDir,
|
||||
cacheDir,
|
||||
gc,
|
||||
gcReferrers,
|
||||
gcDelay,
|
||||
untaggedImageRetentionDelay,
|
||||
dedupe,
|
||||
commit,
|
||||
log,
|
||||
|
@ -86,9 +86,7 @@ func createMockStorage(rootDir string, cacheDir string, dedupe bool, store drive
|
||||
}, log)
|
||||
}
|
||||
|
||||
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver,
|
||||
)
|
||||
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
|
||||
return il
|
||||
}
|
||||
@ -99,9 +97,7 @@ func createMockStorageWithMockCache(rootDir string, dedupe bool, store driver.St
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
il := s3.NewImageStore(rootDir, "", true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver,
|
||||
)
|
||||
il := s3.NewImageStore(rootDir, "", dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
|
||||
return il
|
||||
}
|
||||
@ -161,8 +157,7 @@ func createObjectsStore(rootDir string, cacheDir string, dedupe bool) (
|
||||
}, log)
|
||||
}
|
||||
|
||||
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
|
||||
return store, il, err
|
||||
}
|
||||
@ -196,8 +191,7 @@ func createObjectsStoreDynamo(rootDir string, cacheDir string, dedupe bool, tabl
|
||||
panic(err)
|
||||
}
|
||||
|
||||
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver)
|
||||
|
||||
return store, il, err
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
common "zotregistry.io/zot/pkg/storage/common"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
@ -40,8 +39,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
Name: "cache",
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
|
||||
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||
|
||||
Convey("Scrub only one repo", t, func(c C) {
|
||||
// initialize repo
|
||||
|
@ -52,7 +52,6 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
|
||||
//nolint:typecheck,contextcheck
|
||||
rootDir := config.Storage.RootDirectory
|
||||
defaultStore = local.NewImageStore(rootDir,
|
||||
config.Storage.GC, config.Storage.GCReferrers, config.Storage.GCDelay, config.Storage.UntaggedImageRetentionDelay,
|
||||
config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter,
|
||||
CreateCacheDatabaseDriver(config.Storage.StorageConfig, log),
|
||||
)
|
||||
@ -80,9 +79,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
|
||||
// false positive lint - linter does not implement Lint method
|
||||
//nolint: typecheck,contextcheck
|
||||
defaultStore = s3.NewImageStore(rootDir, config.Storage.RootDirectory,
|
||||
config.Storage.GC, config.Storage.GCReferrers, config.Storage.GCDelay,
|
||||
config.Storage.UntaggedImageRetentionDelay, config.Storage.Dedupe,
|
||||
config.Storage.Commit, log, metrics, linter, store,
|
||||
config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter, store,
|
||||
CreateCacheDatabaseDriver(config.Storage.StorageConfig, log))
|
||||
}
|
||||
|
||||
@ -155,9 +152,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
|
||||
if isUnique {
|
||||
rootDir := storageConfig.RootDirectory
|
||||
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(rootDir,
|
||||
storageConfig.GC, storageConfig.GCReferrers, storageConfig.GCDelay,
|
||||
storageConfig.UntaggedImageRetentionDelay, storageConfig.Dedupe,
|
||||
storageConfig.Commit, log, metrics, linter,
|
||||
storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter,
|
||||
CreateCacheDatabaseDriver(storageConfig, log),
|
||||
)
|
||||
|
||||
@ -188,9 +183,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
|
||||
// false positive lint - linter does not implement Lint method
|
||||
//nolint: typecheck
|
||||
subImageStore[route] = s3.NewImageStore(rootDir, storageConfig.RootDirectory,
|
||||
storageConfig.GC, storageConfig.GCReferrers, storageConfig.GCDelay,
|
||||
storageConfig.UntaggedImageRetentionDelay, storageConfig.Dedupe,
|
||||
storageConfig.Commit, log, metrics, linter, store,
|
||||
storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, store,
|
||||
CreateCacheDatabaseDriver(storageConfig, log),
|
||||
)
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/storage/cache"
|
||||
storageCommon "zotregistry.io/zot/pkg/storage/common"
|
||||
storageConstants "zotregistry.io/zot/pkg/storage/constants"
|
||||
"zotregistry.io/zot/pkg/storage/gc"
|
||||
"zotregistry.io/zot/pkg/storage/imagestore"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/storage/s3"
|
||||
@ -54,7 +55,7 @@ func skipIt(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func createObjectsStore(rootDir string, cacheDir string, gcDelay time.Duration, imageRetention time.Duration) (
|
||||
func createObjectsStore(rootDir string, cacheDir string) (
|
||||
driver.StorageDriver, storageTypes.ImageStore, error,
|
||||
) {
|
||||
bucket := "zot-storage-test"
|
||||
@ -93,9 +94,7 @@ func createObjectsStore(rootDir string, cacheDir string, gcDelay time.Duration,
|
||||
UseRelPaths: false,
|
||||
}, log)
|
||||
|
||||
il := s3.NewImageStore(rootDir, cacheDir, true, true, gcDelay, imageRetention,
|
||||
true, false, log, metrics, nil, store, cacheDriver,
|
||||
)
|
||||
il := s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, cacheDriver)
|
||||
|
||||
return store, il, err
|
||||
}
|
||||
@ -132,8 +131,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -148,8 +146,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
@ -765,11 +762,9 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
testDir = path.Join("/oci-repo-test", uuid.String())
|
||||
tdir = t.TempDir()
|
||||
|
||||
store, _, _ = createObjectsStore(testDir, tdir, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, _, _ = createObjectsStore(testDir, tdir)
|
||||
driver := s3.New(store)
|
||||
imgStore = imagestore.NewImageStore(testDir, tdir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics,
|
||||
imgStore = imagestore.NewImageStore(testDir, tdir, false, false, log, metrics,
|
||||
&mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
|
||||
return false, nil
|
||||
@ -785,8 +780,7 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
driver := local.New(true)
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true,
|
||||
true, log, metrics, &mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
|
||||
return false, nil
|
||||
@ -839,8 +833,7 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
Convey("Error on mandatory annotations", func() {
|
||||
if testcase.storageType == storageConstants.S3StorageDriverName {
|
||||
driver := s3.New(store)
|
||||
imgStore = imagestore.NewImageStore(testDir, tdir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics,
|
||||
imgStore = imagestore.NewImageStore(testDir, tdir, false, false, log, metrics,
|
||||
&mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
|
||||
//nolint: goerr113
|
||||
@ -854,8 +847,7 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
driver := local.New(true)
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true,
|
||||
true, log, metrics, &mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
|
||||
//nolint: goerr113
|
||||
@ -894,8 +886,7 @@ func TestDeleteBlobsInUse(t *testing.T) {
|
||||
testDir = path.Join("/oci-repo-test", uuid.String())
|
||||
tdir = t.TempDir()
|
||||
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
@ -906,8 +897,7 @@ func TestDeleteBlobsInUse(t *testing.T) {
|
||||
UseRelPaths: true,
|
||||
}, log)
|
||||
driver := local.New(true)
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
imgStore = imagestore.NewImageStore(tdir, tdir, true,
|
||||
true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
@ -1191,18 +1181,15 @@ func TestStorageHandler(t *testing.T) {
|
||||
var thirdStorageDriver driver.StorageDriver
|
||||
|
||||
firstRootDir = "/util_test1"
|
||||
firstStorageDriver, firstStore, _ = createObjectsStore(firstRootDir, t.TempDir(),
|
||||
storageConstants.DefaultGCDelay, storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
firstStorageDriver, firstStore, _ = createObjectsStore(firstRootDir, t.TempDir())
|
||||
defer cleanupStorage(firstStorageDriver, firstRootDir)
|
||||
|
||||
secondRootDir = "/util_test2"
|
||||
secondStorageDriver, secondStore, _ = createObjectsStore(secondRootDir, t.TempDir(),
|
||||
storageConstants.DefaultGCDelay, storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
secondStorageDriver, secondStore, _ = createObjectsStore(secondRootDir, t.TempDir())
|
||||
defer cleanupStorage(secondStorageDriver, secondRootDir)
|
||||
|
||||
thirdRootDir = "/util_test3"
|
||||
thirdStorageDriver, thirdStore, _ = createObjectsStore(thirdRootDir, t.TempDir(),
|
||||
storageConstants.DefaultGCDelay, storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
thirdStorageDriver, thirdStore, _ = createObjectsStore(thirdRootDir, t.TempDir())
|
||||
defer cleanupStorage(thirdStorageDriver, thirdRootDir)
|
||||
} else {
|
||||
// Create temporary directory
|
||||
@ -1217,14 +1204,11 @@ func TestStorageHandler(t *testing.T) {
|
||||
driver := local.New(true)
|
||||
|
||||
// Create ImageStore
|
||||
firstStore = imagestore.NewImageStore(firstRootDir, firstRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, driver, nil)
|
||||
firstStore = imagestore.NewImageStore(firstRootDir, firstRootDir, false, false, log, metrics, nil, driver, nil)
|
||||
|
||||
secondStore = imagestore.NewImageStore(secondRootDir, secondRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, driver, nil)
|
||||
secondStore = imagestore.NewImageStore(secondRootDir, secondRootDir, false, false, log, metrics, nil, driver, nil)
|
||||
|
||||
thirdStore = imagestore.NewImageStore(thirdRootDir, thirdRootDir, false, false, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, driver, nil)
|
||||
thirdStore = imagestore.NewImageStore(thirdRootDir, thirdRootDir, false, false, log, metrics, nil, driver, nil)
|
||||
}
|
||||
|
||||
Convey("Test storage handler", t, func() {
|
||||
@ -1290,8 +1274,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -1304,10 +1287,15 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
repoName := "gc-long"
|
||||
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
@ -1361,7 +1349,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
_, _, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// put artifact referencing above image
|
||||
@ -1405,7 +1393,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
ispec.MediaTypeImageManifest, artifactManifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
@ -1419,7 +1407,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
@ -1448,8 +1436,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, gcDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -1462,11 +1449,16 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, gcDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true,
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true,
|
||||
true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: gcDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
// upload orphan blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
@ -1622,7 +1614,13 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
Digest: digest,
|
||||
Size: int64(len(manifestBuf)),
|
||||
}
|
||||
orasArtifactManifest.Blobs = []artifactspec.Descriptor{}
|
||||
orasArtifactManifest.Blobs = []artifactspec.Descriptor{
|
||||
{
|
||||
Digest: artifactBlobDigest,
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Size: int64(len(artifactBlob)),
|
||||
},
|
||||
}
|
||||
|
||||
orasArtifactManifestBuf, err := json.Marshal(orasArtifactManifest)
|
||||
So(err, ShouldBeNil)
|
||||
@ -1637,7 +1635,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, orasDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest)
|
||||
@ -1663,7 +1661,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
@ -1700,7 +1698,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, tag, false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
@ -1749,8 +1747,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, gcDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -1763,10 +1760,15 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, gcDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: gcDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
// first upload an image to the first repo and wait for GC timeout
|
||||
|
||||
repo1Name := "gc1"
|
||||
@ -1943,7 +1945,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
|
||||
_, _, err = imgStore.PutImageManifest(repo2Name, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repo2Name)
|
||||
err = gc.CleanRepo(repo2Name)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// original blob should exist
|
||||
@ -1981,8 +1983,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -1995,10 +1996,15 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, storageConstants.DefaultGCDelay,
|
||||
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: storageConstants.DefaultGCDelay,
|
||||
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
|
||||
}, log)
|
||||
|
||||
repoName := "gc-long"
|
||||
|
||||
bdgst, digest, indexDigest, indexSize := pushRandomImageIndex(imgStore, repoName)
|
||||
@ -2060,7 +2066,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
ispec.MediaTypeImageManifest, artifactManifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, bdgst)
|
||||
@ -2071,7 +2077,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, indexDigest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdgst)
|
||||
@ -2107,7 +2113,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, gcDelay, imageRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -2120,10 +2126,15 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, gcDelay,
|
||||
imageRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: gcDelay,
|
||||
RetentionDelay: imageRetentionDelay,
|
||||
}, log)
|
||||
|
||||
// upload orphan blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2260,7 +2271,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, orasDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, orasDigest.String())
|
||||
@ -2281,7 +2292,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
Convey("delete inner referenced manifest", func() {
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check orphan artifact is gc'ed
|
||||
@ -2300,7 +2311,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, artifactDigest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactOfArtifactManifestDigest.String())
|
||||
@ -2314,7 +2325,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
})
|
||||
|
||||
Convey("delete index manifest, references should not be persisted", func() {
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check orphan artifact is gc'ed
|
||||
@ -2333,7 +2344,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, indexDigest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactDigest.String())
|
||||
@ -2345,10 +2356,8 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactOfArtifactManifestDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// isn't yet gced because manifests part of index are removed after gcReferrers,
|
||||
// so the artifacts pointing to manifest which are part of index are not removed after a first gcRepo
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactManifestIndexDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// orphan blob
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest)
|
||||
@ -2372,10 +2381,6 @@ func TestGarbageCollectImageIndex(t *testing.T) {
|
||||
_, _, _, err := imgStore.GetImageManifest(repoName, artifactDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// this will remove refferers of manifests part of image index
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactManifestIndexDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
@ -2418,7 +2423,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
tdir := t.TempDir()
|
||||
|
||||
var store driver.StorageDriver
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir, gcDelay, imageRetentionDelay)
|
||||
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||
defer cleanupStorage(store, testDir)
|
||||
} else {
|
||||
dir := t.TempDir()
|
||||
@ -2431,10 +2436,15 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
|
||||
driver := local.New(true)
|
||||
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, gcDelay,
|
||||
imageRetentionDelay, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
|
||||
}
|
||||
|
||||
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
|
||||
Referrers: true,
|
||||
Delay: gcDelay,
|
||||
RetentionDelay: imageRetentionDelay,
|
||||
}, log)
|
||||
|
||||
// upload orphan blob
|
||||
upload, err := imgStore.NewBlobUpload(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2754,7 +2764,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, orasDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, orasDigest.String())
|
||||
@ -2775,7 +2785,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
Convey("delete inner referenced manifest", func() {
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check orphan artifact is gc'ed
|
||||
@ -2794,7 +2804,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, artifactDigest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactOfArtifactManifestDigest.String())
|
||||
@ -2808,7 +2818,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
})
|
||||
|
||||
Convey("delete index manifest, references should not be persisted", func() {
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check orphan artifact is gc'ed
|
||||
@ -2827,9 +2837,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, indexDigest.String(), false)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// this will remove artifacts pointing to root index which was remove
|
||||
// it will also remove inner index because now although its referenced in index.json it has no tag
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
err = gc.CleanRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactDigest.String())
|
||||
@ -2841,11 +2849,6 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactOfArtifactManifestDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// isn't yet gced because manifests part of index are removed after gcReferrers,
|
||||
// so the artifacts pointing to manifest which are part of index are not removed after a single gcRepo
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactManifestIndexDigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// orphan blob
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest)
|
||||
So(err, ShouldNotBeNil)
|
||||
@ -2859,20 +2862,10 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
|
||||
_, _, _, err := imgStore.GetImageManifest(repoName, artifactDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// this will remove manifests referenced in inner index because even if they are referenced in index.json
|
||||
// they do not have tags
|
||||
// it will also remove referrers pointing to inner manifest
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check inner index artifact is gc'ed
|
||||
_, _, _, err = imgStore.GetImageManifest(repoName, artifactManifestInnerIndexDigest.String())
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// this will remove referrers pointing to manifests referenced in inner index
|
||||
err = imgStore.RunGCRepo(repoName)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check last manifest from index image
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
@ -44,12 +44,12 @@ type ImageStore interface { //nolint:interfacebloat
|
||||
GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error)
|
||||
DeleteBlob(repo string, digest godigest.Digest) error
|
||||
CleanupRepo(repo string, blobs []godigest.Digest, removeRepo bool) (int, error)
|
||||
GetIndexContent(repo string) ([]byte, error)
|
||||
PutIndexContent(repo string, index ispec.Index) error
|
||||
GetBlobContent(repo string, digest godigest.Digest) ([]byte, error)
|
||||
GetReferrers(repo string, digest godigest.Digest, artifactTypes []string) (ispec.Index, error)
|
||||
GetOrasReferrers(repo string, digest godigest.Digest, artifactType string) ([]artifactspec.Descriptor, error)
|
||||
RunGCRepo(repo string) error
|
||||
RunGCPeriodically(interval time.Duration, sch *scheduler.Scheduler)
|
||||
RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler)
|
||||
RunDedupeForDigest(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
|
||||
GetNextDigestWithBlobPaths(lastDigests []godigest.Digest) (godigest.Digest, []string, error)
|
||||
|
@ -1574,7 +1574,7 @@ func CustomRedirectPolicy(noOfRedirect int) resty.RedirectPolicy {
|
||||
}
|
||||
|
||||
func GetDefaultImageStore(rootDir string, log zLog.Logger) stypes.ImageStore {
|
||||
return local.NewImageStore(rootDir, false, false, time.Hour, time.Hour, false, false, log,
|
||||
return local.NewImageStore(rootDir, false, false, log,
|
||||
monitoring.NewMetricsServer(false, log),
|
||||
mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore stypes.ImageStore) (bool, error) {
|
||||
|
@ -52,6 +52,8 @@ type MockedImageStore struct {
|
||||
RunDedupeForDigestFn func(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
|
||||
GetNextDigestWithBlobPathsFn func(lastDigests []godigest.Digest) (godigest.Digest, []string, error)
|
||||
GetAllBlobsFn func(repo string) ([]string, error)
|
||||
CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error)
|
||||
PutIndexContentFn func(repo string, index ispec.Index) error
|
||||
}
|
||||
|
||||
func (is MockedImageStore) Lock(t *time.Time) {
|
||||
@ -378,3 +380,19 @@ func (is MockedImageStore) GetNextDigestWithBlobPaths(lastDigests []godigest.Dig
|
||||
|
||||
return "", []string{}, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRepo bool) (int, error) {
|
||||
if is.CleanupRepoFn != nil {
|
||||
return is.CleanupRepoFn(repo, blobs, removeRepo)
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) PutIndexContent(repo string, index ispec.Index) error {
|
||||
if is.PutIndexContentFn != nil {
|
||||
return is.PutIndexContentFn(repo, index)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ type MetaDBMock struct {
|
||||
|
||||
SetRepoReferenceFn func(repo string, Reference string, manifestDigest godigest.Digest, mediaType string) error
|
||||
|
||||
RemoveRepoReferenceFn func(repo, reference string, manifestDigest godigest.Digest) error
|
||||
|
||||
DeleteRepoTagFn func(repo string, tag string) error
|
||||
|
||||
GetRepoMetaFn func(repo string) (mTypes.RepoMetadata, error)
|
||||
@ -168,6 +170,14 @@ func (sdm MetaDBMock) SetRepoReference(repo string, reference string, manifestDi
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm MetaDBMock) RemoveRepoReference(repo, reference string, manifestDigest godigest.Digest) error {
|
||||
if sdm.RemoveRepoReferenceFn != nil {
|
||||
return sdm.RemoveRepoReferenceFn(repo, reference, manifestDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm MetaDBMock) DeleteRepoTag(repo string, tag string) error {
|
||||
if sdm.DeleteRepoTagFn != nil {
|
||||
return sdm.DeleteRepoTagFn(repo, tag)
|
||||
|
@ -347,7 +347,7 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
Convey("extractImageDetails good workflow", t, func() {
|
||||
dir := t.TempDir()
|
||||
testLogger := log.NewLogger("debug", "")
|
||||
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(dir, false, false,
|
||||
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -383,7 +383,7 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
Convey("extractImageDetails bad ispec.ImageManifest", t, func() {
|
||||
dir := t.TempDir()
|
||||
testLogger := log.NewLogger("debug", "")
|
||||
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(dir, false, false,
|
||||
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
@ -403,7 +403,7 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
Convey("extractImageDetails bad imageConfig", t, func() {
|
||||
dir := t.TempDir()
|
||||
testLogger := log.NewLogger("debug", "")
|
||||
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
|
||||
imageStore := local.NewImageStore(dir, false, false,
|
||||
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
|
||||
|
||||
storeController := storage.StoreController{
|
||||
|
Loading…
x
Reference in New Issue
Block a user