Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
parent
67294cc669
commit
8237f8d20a
@ -26,6 +26,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/storage/s3"
|
||||
)
|
||||
|
||||
@ -251,7 +252,7 @@ func (c *Controller) InitImageStore(reloadCtx context.Context) error {
|
||||
if c.Config.Storage.RootDirectory != "" {
|
||||
// no need to validate hard links work on s3
|
||||
if c.Config.Storage.Dedupe && c.Config.Storage.StorageDriver == nil {
|
||||
err := storage.ValidateHardLink(c.Config.Storage.RootDirectory)
|
||||
err := local.ValidateHardLink(c.Config.Storage.RootDirectory)
|
||||
if err != nil {
|
||||
c.Log.Warn().Msg("input storage root directory filesystem does not supports hardlinking," +
|
||||
"disabling dedupe functionality")
|
||||
@ -264,7 +265,7 @@ func (c *Controller) InitImageStore(reloadCtx context.Context) error {
|
||||
if c.Config.Storage.StorageDriver == nil {
|
||||
// false positive lint - linter does not implement Lint method
|
||||
// nolint: typecheck
|
||||
defaultStore = storage.NewImageStore(c.Config.Storage.RootDirectory,
|
||||
defaultStore = local.NewImageStore(c.Config.Storage.RootDirectory,
|
||||
c.Config.Storage.GC, c.Config.Storage.GCDelay,
|
||||
c.Config.Storage.Dedupe, c.Config.Storage.Commit, c.Log, c.Metrics, linter,
|
||||
)
|
||||
@ -335,7 +336,7 @@ func (c *Controller) getSubStore(subPaths map[string]config.StorageConfig,
|
||||
for route, storageConfig := range subPaths {
|
||||
// no need to validate hard links work on s3
|
||||
if storageConfig.Dedupe && storageConfig.StorageDriver == nil {
|
||||
err := storage.ValidateHardLink(storageConfig.RootDirectory)
|
||||
err := local.ValidateHardLink(storageConfig.RootDirectory)
|
||||
if err != nil {
|
||||
c.Log.Warn().Msg("input storage root directory filesystem does not supports hardlinking, " +
|
||||
"disabling dedupe functionality")
|
||||
@ -370,7 +371,7 @@ func (c *Controller) getSubStore(subPaths map[string]config.StorageConfig,
|
||||
// add it to uniqueSubFiles
|
||||
// Create a new image store and assign it to imgStoreMap
|
||||
if isUnique {
|
||||
imgStoreMap[storageConfig.RootDirectory] = storage.NewImageStore(storageConfig.RootDirectory,
|
||||
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(storageConfig.RootDirectory,
|
||||
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics, linter)
|
||||
|
||||
subImageStore[route] = imgStoreMap[storageConfig.RootDirectory]
|
||||
|
@ -49,6 +49,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/api/constants"
|
||||
extconf "zotregistry.io/zot/pkg/extensions/config"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -5420,7 +5421,7 @@ func TestManifestImageIndex(t *testing.T) {
|
||||
|
||||
Convey("Corrupt index", func() {
|
||||
err = os.WriteFile(path.Join(dir, "index", "blobs", index1dgst.Algorithm().String(), index1dgst.Encoded()),
|
||||
[]byte("deadbeef"), storage.DefaultFilePerms)
|
||||
[]byte("deadbeef"), local.DefaultFilePerms)
|
||||
So(err, ShouldBeNil)
|
||||
resp, err = resty.R().Delete(baseURL + fmt.Sprintf("/v2/index/manifests/%s", index1dgst))
|
||||
So(err, ShouldBeNil)
|
||||
@ -6289,7 +6290,7 @@ func TestDistSpecExtensions(t *testing.T) {
|
||||
func getAllBlobs(imagePath string) []string {
|
||||
blobList := make([]string, 0)
|
||||
|
||||
if !storage.DirExists(imagePath) {
|
||||
if !local.DirExists(imagePath) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
@ -6334,7 +6335,7 @@ func getAllBlobs(imagePath string) []string {
|
||||
func getAllManifests(imagePath string) []string {
|
||||
manifestList := make([]string, 0)
|
||||
|
||||
if !storage.DirExists(imagePath) {
|
||||
if !local.DirExists(imagePath) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
zotErrors "zotregistry.io/zot/errors"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -160,7 +160,7 @@ func loadPerHostCerts(caCertPool *x509.CertPool, host string) *tls.Config {
|
||||
home := os.Getenv("HOME")
|
||||
clientCertsDir := filepath.Join(home, homeCertsDir, host)
|
||||
|
||||
if storage.DirExists(clientCertsDir) {
|
||||
if local.DirExists(clientCertsDir) {
|
||||
tlsConfig, err := getTLSConfig(clientCertsDir, caCertPool)
|
||||
|
||||
if err == nil {
|
||||
@ -170,7 +170,7 @@ func loadPerHostCerts(caCertPool *x509.CertPool, host string) *tls.Config {
|
||||
|
||||
// Check if the /etc/containers/certs.d/$IP:$PORT dir exists
|
||||
clientCertsDir = filepath.Join(certsPath, host)
|
||||
if storage.DirExists(clientCertsDir) {
|
||||
if local.DirExists(clientCertsDir) {
|
||||
tlsConfig, err := getTLSConfig(clientCertsDir, caCertPool)
|
||||
|
||||
if err == nil {
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/lint"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -493,7 +493,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
var index ispec.Index
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
indexContent, err := imgStore.GetIndexContent("zot-test")
|
||||
@ -526,7 +526,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
var index ispec.Index
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
indexContent, err := imgStore.GetIndexContent("zot-test")
|
||||
@ -597,7 +597,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -660,7 +660,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -725,7 +725,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
|
||||
@ -789,7 +789,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000)
|
||||
@ -888,7 +888,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
index.Manifests = append(index.Manifests, manifestDesc)
|
||||
|
||||
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
|
||||
imgStore := storage.NewImageStore(dir, false, 0, false, false,
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Hex()), 0o000)
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/extensions/scrub"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -238,7 +238,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
log := log.NewLogger("debug", logFile.Name())
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true,
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
|
||||
@ -268,7 +268,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
log := log.NewLogger("debug", logFile.Name())
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true,
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
|
||||
@ -304,7 +304,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
log := log.NewLogger("debug", logFile.Name())
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second,
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
)
|
||||
@ -1025,10 +1026,10 @@ func TestUtilsMethod(t *testing.T) {
|
||||
conf.Extensions.Lint = &extconf.LintConfig{}
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := storage.NewImageStore(rootDir, false,
|
||||
defaultStore := local.NewImageStore(rootDir, false,
|
||||
storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
subStore := storage.NewImageStore(subRootDir, false,
|
||||
subStore := local.NewImageStore(subRootDir, false,
|
||||
storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
subStoreMap := make(map[string]storage.ImageStore)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/search/cve/trivy"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
)
|
||||
@ -86,7 +87,7 @@ func testSetup() error {
|
||||
conf.Extensions = &extconf.ExtensionConfig{}
|
||||
conf.Extensions.Lint = &extconf.LintConfig{}
|
||||
|
||||
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)}
|
||||
storeController := storage.StoreController{DefaultStore: local.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)}
|
||||
|
||||
layoutUtils := common.NewBaseOciLayoutUtils(storeController, log)
|
||||
scanner := trivy.NewScanner(storeController, layoutUtils, log)
|
||||
@ -329,7 +330,7 @@ func TestImageFormat(t *testing.T) {
|
||||
conf.Extensions.Lint = &extconf.LintConfig{}
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := storage.NewImageStore(dbDir, false, storage.DefaultGCDelay,
|
||||
defaultStore := local.NewImageStore(dbDir, false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
storeController := storage.StoreController{DefaultStore: defaultStore}
|
||||
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -64,11 +65,11 @@ func TestMultipleStoragePath(t *testing.T) {
|
||||
conf.Extensions.Lint = &extconf.LintConfig{}
|
||||
|
||||
// Create ImageStore
|
||||
firstStore := storage.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
firstStore := local.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
secondStore := storage.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
secondStore := local.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
thirdStore := storage.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
thirdStore := local.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
digestinfo "zotregistry.io/zot/pkg/extensions/search/digest"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
. "zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -102,7 +103,7 @@ func testSetup() error {
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storeController := storage.StoreController{
|
||||
DefaultStore: storage.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil),
|
||||
DefaultStore: local.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil),
|
||||
}
|
||||
|
||||
digestInfo = digestinfo.NewDigestInfo(storeController, log)
|
||||
@ -115,7 +116,7 @@ func TestDigestInfo(t *testing.T) {
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storeController := storage.StoreController{
|
||||
DefaultStore: storage.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil),
|
||||
DefaultStore: local.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil),
|
||||
}
|
||||
|
||||
digestInfo = digestinfo.NewDigestInfo(storeController, log)
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
localCtx "zotregistry.io/zot/pkg/requestcontext"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
)
|
||||
|
||||
@ -266,7 +266,7 @@ func TestUserAvailableRepos(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
dir := t.TempDir()
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
defaultStore := storage.NewImageStore(dir, false, 0, false, false, log, metrics, nil)
|
||||
defaultStore := local.NewImageStore(dir, false, 0, false, false, log, metrics, nil)
|
||||
|
||||
repoList, err := defaultStore.GetRepositories()
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -60,7 +61,7 @@ func TestInjectSyncUtils(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imageStore := storage.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
|
||||
imageStore := local.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil,
|
||||
)
|
||||
injected = test.InjectFailure(0)
|
||||
@ -159,7 +160,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imageStore := storage.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
|
||||
imageStore := local.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
|
||||
err := os.Chmod(imageStore.RootDir(), 0o000)
|
||||
@ -319,15 +320,15 @@ func TestSyncInternal(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{desc},
|
||||
}
|
||||
|
||||
err = syncCosignSignature(client, &storage.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
err = syncCosignSignature(client, &local.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
testImageTag, &ispec.Manifest{}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = syncCosignSignature(client, &storage.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
err = syncCosignSignature(client, &local.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
testImageTag, &manifest, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = syncNotarySignature(client, &storage.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
err = syncNotarySignature(client, &local.ImageStoreLocal{}, *regURL, testImage, testImage,
|
||||
"invalidDigest", ReferenceList{[]artifactspec.Descriptor{ref}}, log)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
@ -343,7 +344,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imageStore := storage.NewImageStore(storageDir, false, storage.DefaultGCDelay,
|
||||
imageStore := local.NewImageStore(storageDir, false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
|
||||
refs := ReferenceList{[]artifactspec.Descriptor{
|
||||
@ -426,7 +427,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imageStore := storage.NewImageStore(storageDir, false, storage.DefaultGCDelay,
|
||||
imageStore := local.NewImageStore(storageDir, false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
|
||||
storeController := storage.StoreController{}
|
||||
@ -448,7 +449,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testImageStore := storage.NewImageStore(testRootDir, false,
|
||||
testImageStore := local.NewImageStore(testRootDir, false,
|
||||
storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
manifestContent, _, _, err := testImageStore.GetImageManifest(testImage, testImageTag)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
@ -270,7 +271,7 @@ func pushSyncedLocalImage(localRepo, tag, localCachePath string,
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
cacheImageStore := storage.NewImageStore(localCachePath, false,
|
||||
cacheImageStore := local.NewImageStore(localCachePath, false,
|
||||
storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
manifestContent, _, _, err := cacheImageStore.GetImageManifest(localRepo, tag)
|
||||
@ -404,7 +405,7 @@ func getLocalCachePath(imageStore storage.ImageStore, repo string) (string, erro
|
||||
// check if SyncBlobUploadDir exists, create if not
|
||||
var err error
|
||||
if _, err = os.ReadDir(localRepoPath); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(localRepoPath, storage.DefaultDirPerms); err != nil {
|
||||
if err = os.MkdirAll(localRepoPath, local.DefaultDirPerms); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@ -423,7 +424,7 @@ func getLocalCachePath(imageStore storage.ImageStore, repo string) (string, erro
|
||||
localCachePath := path.Join(localRepoPath, uuid.String())
|
||||
|
||||
cachedRepoPath := path.Join(localCachePath, repo)
|
||||
if err = os.MkdirAll(cachedRepoPath, storage.DefaultDirPerms); err != nil {
|
||||
if err = os.MkdirAll(cachedRepoPath, local.DefaultDirPerms); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
427
pkg/storage/common.go
Normal file
427
pkg/storage/common.go
Normal file
@ -0,0 +1,427 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/notaryproject/notation-go"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sigstore/cosign/pkg/oci/remote"
|
||||
zerr "zotregistry.io/zot/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlobUploadDir defines the upload directory for blob uploads.
|
||||
BlobUploadDir = ".uploads"
|
||||
SchemaVersion = 2
|
||||
RLOCK = "RLock"
|
||||
RWLOCK = "RWLock"
|
||||
)
|
||||
|
||||
func GetTagsByIndex(index ispec.Index) []string {
|
||||
tags := make([]string, 0)
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
tags = append(tags, v)
|
||||
}
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Descriptor, bool) {
|
||||
var manifestDesc ispec.Descriptor
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if reference == manifest.Digest.String() {
|
||||
return manifest, true
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
return manifest, true
|
||||
}
|
||||
}
|
||||
|
||||
return manifestDesc, false
|
||||
}
|
||||
|
||||
func ValidateManifest(imgStore ImageStore, repo, reference, mediaType string, body []byte,
|
||||
log zerolog.Logger,
|
||||
) (string, error) {
|
||||
// validate the manifest
|
||||
if !IsSupportedMediaType(mediaType) {
|
||||
log.Debug().Interface("actual", mediaType).
|
||||
Msg("bad manifest media type")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
log.Debug().Int("len", len(body)).Msg("invalid body length")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if mediaType == ispec.MediaTypeImageManifest {
|
||||
var manifest ispec.Manifest
|
||||
if err := json.Unmarshal(body, &manifest); err != nil {
|
||||
log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if manifest.Config.MediaType == ispec.MediaTypeImageConfig {
|
||||
digest, err := validateOCIManifest(imgStore, repo, reference, &manifest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("invalid oci image manifest")
|
||||
|
||||
return digest, err
|
||||
}
|
||||
}
|
||||
} else if mediaType == artifactspec.MediaTypeArtifactManifest {
|
||||
var m notation.Descriptor
|
||||
if err := json.Unmarshal(body, &m); err != nil {
|
||||
log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *ispec.Manifest,
|
||||
log zerolog.Logger,
|
||||
) (string, error) {
|
||||
if manifest.SchemaVersion != SchemaVersion {
|
||||
log.Error().Int("SchemaVersion", manifest.SchemaVersion).Msg("invalid manifest")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
// validate image config
|
||||
config := manifest.Config
|
||||
|
||||
blobFile, _, err := imgStore.GetBlob(repo, config.Digest.String(), "")
|
||||
if err != nil {
|
||||
return config.Digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
defer blobFile.Close()
|
||||
|
||||
dec := json.NewDecoder(blobFile)
|
||||
|
||||
var cspec ispec.Image
|
||||
if err := dec.Decode(&cspec); err != nil {
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
// validate the layers
|
||||
for _, l := range manifest.Layers {
|
||||
blobFile, _, err := imgStore.GetBlob(repo, l.Digest.String(), "")
|
||||
if err != nil {
|
||||
return l.Digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
defer blobFile.Close()
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func GetAndValidateRequestDigest(body []byte, digest string, log zerolog.Logger) (godigest.Digest, error) {
|
||||
bodyDigest := godigest.FromBytes(body)
|
||||
|
||||
d, err := godigest.Parse(digest)
|
||||
if err == nil {
|
||||
if d.String() != bodyDigest.String() {
|
||||
log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()).
|
||||
Msg("manifest digest is not valid")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
}
|
||||
|
||||
return bodyDigest, err
|
||||
}
|
||||
|
||||
/* CheckIfIndexNeedsUpdate verifies if an index needs to be updated given a new manifest descriptor.
|
||||
Returns whether or not index needs update, in the latter case it will also return the previous digest. */
|
||||
func CheckIfIndexNeedsUpdate(index *ispec.Index, desc *ispec.Descriptor,
|
||||
log zerolog.Logger,
|
||||
) (bool, godigest.Digest, error) {
|
||||
var oldDgst godigest.Digest
|
||||
|
||||
var reference string
|
||||
|
||||
tag, ok := desc.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
reference = tag
|
||||
} else {
|
||||
reference = desc.Digest.String()
|
||||
}
|
||||
|
||||
updateIndex := true
|
||||
|
||||
for midx, manifest := range index.Manifests {
|
||||
manifest := manifest
|
||||
if reference == manifest.Digest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = &manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
if manifest.Digest.String() == desc.Digest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = &manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// manifest contents have changed for the same tag,
|
||||
// so update index.json descriptor
|
||||
log.Info().
|
||||
Int64("old size", manifest.Size).
|
||||
Int64("new size", desc.Size).
|
||||
Str("old digest", manifest.Digest.String()).
|
||||
Str("new digest", desc.Digest.String()).
|
||||
Str("old mediaType", manifest.MediaType).
|
||||
Str("new mediaType", desc.MediaType).
|
||||
Msg("updating existing tag with new manifest contents")
|
||||
|
||||
// changing media-type is disallowed!
|
||||
if manifest.MediaType != desc.MediaType {
|
||||
err := zerr.ErrBadManifest
|
||||
log.Error().Err(err).
|
||||
Str("old mediaType", manifest.MediaType).
|
||||
Str("new mediaType", desc.MediaType).Msg("cannot change media-type")
|
||||
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
oldDesc := *desc
|
||||
|
||||
desc = &manifest
|
||||
oldDgst = manifest.Digest
|
||||
desc.Size = oldDesc.Size
|
||||
desc.Digest = oldDesc.Digest
|
||||
|
||||
index.Manifests = append(index.Manifests[:midx], index.Manifests[midx+1:]...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return updateIndex, oldDgst, nil
|
||||
}
|
||||
|
||||
func GetIndex(imgStore ImageStore, repo string, log zerolog.Logger) (ispec.Index, error) {
|
||||
var index ispec.Index
|
||||
|
||||
buf, err := imgStore.GetIndexContent(repo)
|
||||
if err != nil {
|
||||
return index, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
log.Error().Err(err).Str("dir", path.Join(imgStore.RootDir(), repo)).Msg("invalid JSON")
|
||||
|
||||
return index, zerr.ErrRepoBadVersion
|
||||
}
|
||||
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func RemoveManifestDescByReference(index *ispec.Index, reference string) (ispec.Descriptor, bool) {
|
||||
var removedManifest ispec.Descriptor
|
||||
|
||||
var found bool
|
||||
|
||||
var outIndex ispec.Index
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && tag == reference {
|
||||
removedManifest = manifest
|
||||
found = true
|
||||
|
||||
continue
|
||||
} else if reference == manifest.Digest.String() {
|
||||
removedManifest = manifest
|
||||
found = true
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
outIndex.Manifests = append(outIndex.Manifests, manifest)
|
||||
}
|
||||
|
||||
index.Manifests = outIndex.Manifests
|
||||
|
||||
return removedManifest, found
|
||||
}
|
||||
|
||||
/* additionally, unmarshal an image index and for all manifests in that
|
||||
index, ensure that they do not have a name or they are not in other
|
||||
manifest indexes else GC can never clean them. */
|
||||
func UpdateIndexWithPrunedImageManifests(imgStore ImageStore, index *ispec.Index, repo string,
|
||||
desc ispec.Descriptor, oldDgst godigest.Digest, log zerolog.Logger,
|
||||
) error {
|
||||
if (desc.MediaType == ispec.MediaTypeImageIndex) && (oldDgst != "") {
|
||||
otherImgIndexes := []ispec.Descriptor{}
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
otherImgIndexes = append(otherImgIndexes, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
otherImgIndexes = append(otherImgIndexes, desc)
|
||||
|
||||
prunedManifests, err := PruneImageManifestsFromIndex(imgStore, repo, oldDgst, *index, otherImgIndexes, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index.Manifests = prunedManifests
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
before an image index manifest is pushed to a repo, its constituent manifests
|
||||
are pushed first, so when updating/removing this image index manifest, we also
|
||||
need to determine if there are other image index manifests which refer to the
|
||||
same constitutent manifests so that they can be garbage-collected correctly
|
||||
|
||||
pruneImageManifestsFromIndex is a helper routine to achieve this.
|
||||
*/
|
||||
func PruneImageManifestsFromIndex(imgStore ImageStore, repo string, digest godigest.Digest, // nolint: gocyclo
|
||||
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zerolog.Logger,
|
||||
) ([]ispec.Descriptor, error) {
|
||||
dir := path.Join(imgStore.RootDir(), repo)
|
||||
|
||||
indexPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
buf, err := imgStore.GetBlobContent(repo, digest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imgIndex ispec.Index
|
||||
if err := json.Unmarshal(buf, &imgIndex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inUse := map[string]uint{}
|
||||
|
||||
for _, manifest := range imgIndex.Manifests {
|
||||
inUse[manifest.Digest.Encoded()]++
|
||||
}
|
||||
|
||||
for _, otherIndex := range otherImgIndexes {
|
||||
buf, err := imgStore.GetBlobContent(repo, otherIndex.Digest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexPath := path.Join(imgStore.RootDir(), repo, "blobs",
|
||||
otherIndex.Digest.Algorithm().String(), otherIndex.Digest.Encoded())
|
||||
|
||||
var oindex ispec.Index
|
||||
if err := json.Unmarshal(buf, &oindex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, omanifest := range oindex.Manifests {
|
||||
_, ok := inUse[omanifest.Digest.Encoded()]
|
||||
if ok {
|
||||
inUse[omanifest.Digest.Encoded()]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prunedManifests := []ispec.Descriptor{}
|
||||
|
||||
// for all manifests in the index, skip those that either have a tag or
|
||||
// are used in other imgIndexes
|
||||
for _, outManifest := range outIndex.Manifests {
|
||||
if outManifest.MediaType != ispec.MediaTypeImageManifest {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := outManifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
count, ok := inUse[outManifest.Digest.Encoded()]
|
||||
if !ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
// this manifest is in use in other image indexes
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return prunedManifests, nil
|
||||
}
|
||||
|
||||
func ApplyLinter(imgStore ImageStore, linter Lint, repo string, manifestDesc ispec.Descriptor) (bool, error) {
|
||||
pass := true
|
||||
|
||||
if linter != nil {
|
||||
tag := manifestDesc.Annotations[ispec.AnnotationRefName]
|
||||
// apply linter only on images, not signatures
|
||||
if manifestDesc.MediaType == ispec.MediaTypeImageManifest &&
|
||||
// check that image manifest is not cosign signature
|
||||
!strings.HasPrefix(tag, "sha256-") &&
|
||||
!strings.HasSuffix(tag, remote.SignatureTagSuffix) {
|
||||
// lint new index with new manifest before writing to disk
|
||||
pass, err := linter.Lint(repo, manifestDesc.Digest, imgStore)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !pass {
|
||||
return false, zerr.ErrImageLintAnnotations
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pass, nil
|
||||
}
|
||||
|
||||
func IsSupportedMediaType(mediaType string) bool {
|
||||
return mediaType == ispec.MediaTypeImageIndex ||
|
||||
mediaType == ispec.MediaTypeImageManifest ||
|
||||
mediaType == artifactspec.MediaTypeArtifactManifest
|
||||
}
|
97
pkg/storage/common_test.go
Normal file
97
pkg/storage/common_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package storage_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
func TestValidateManifest(t *testing.T) {
|
||||
Convey("Make manifest", t, func(c C) {
|
||||
dir := t.TempDir()
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
content := []byte("this is a blob")
|
||||
digest := godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
|
||||
_, blen, err := imgStore.FullBlobUpload("test", bytes.NewReader(content), digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(blen, ShouldEqual, len(content))
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
|
||||
Convey("bad manifest schema version", func() {
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: digest,
|
||||
Size: int64(len(content)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 999
|
||||
|
||||
body, err := json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, body)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("bad config blob", func() {
|
||||
manifest := ispec.Manifest{
|
||||
Config: ispec.Descriptor{
|
||||
MediaType: ispec.MediaTypeImageConfig,
|
||||
Digest: cdigest,
|
||||
Size: int64(len(cblob)),
|
||||
},
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Digest: digest,
|
||||
Size: int64(len(content)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
manifest.SchemaVersion = 2
|
||||
|
||||
configBlobPath := imgStore.BlobPath("test", cdigest)
|
||||
|
||||
err := os.WriteFile(configBlobPath, []byte("bad config blob"), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
body, err := json.Marshal(manifest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, body)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package storage
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
@ -20,55 +19,38 @@ import (
|
||||
apexlog "github.com/apex/log"
|
||||
guuid "github.com/gofrs/uuid"
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/notaryproject/notation-go"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/umoci"
|
||||
"github.com/opencontainers/umoci/oci/casext"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sigstore/cosign/pkg/oci/remote"
|
||||
zerr "zotregistry.io/zot/errors"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
zlog "zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlobUploadDir defines the upload directory for blob uploads.
|
||||
BlobUploadDir = ".uploads"
|
||||
SchemaVersion = 2
|
||||
DefaultFilePerms = 0o600
|
||||
DefaultDirPerms = 0o700
|
||||
RLOCK = "RLock"
|
||||
RWLOCK = "RWLock"
|
||||
)
|
||||
|
||||
// BlobUpload models and upload request.
|
||||
type BlobUpload struct {
|
||||
StoreName string
|
||||
ID string
|
||||
}
|
||||
|
||||
type StoreController struct {
|
||||
DefaultStore ImageStore
|
||||
SubStore map[string]ImageStore
|
||||
}
|
||||
|
||||
// ImageStoreLocal provides the image storage operations.
|
||||
type ImageStoreLocal struct {
|
||||
rootDir string
|
||||
lock *sync.RWMutex
|
||||
blobUploads map[string]BlobUpload
|
||||
cache *Cache
|
||||
blobUploads map[string]storage.BlobUpload
|
||||
cache *storage.Cache
|
||||
gc bool
|
||||
dedupe bool
|
||||
commit bool
|
||||
gcDelay time.Duration
|
||||
log zerolog.Logger
|
||||
metrics monitoring.MetricServer
|
||||
linter Lint
|
||||
linter storage.Lint
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) RootDir() string {
|
||||
@ -79,39 +61,10 @@ func (is *ImageStoreLocal) DirExists(d string) bool {
|
||||
return DirExists(d)
|
||||
}
|
||||
|
||||
func getRoutePrefix(name string) string {
|
||||
names := strings.SplitN(name, "/", 2) //nolint:gomnd
|
||||
|
||||
if len(names) != 2 { //nolint:gomnd
|
||||
// it means route is of global storage e.g "centos:latest"
|
||||
if len(names) == 1 {
|
||||
return "/"
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/%s", names[0])
|
||||
}
|
||||
|
||||
func (sc StoreController) GetImageStore(name string) ImageStore {
|
||||
if sc.SubStore != nil {
|
||||
// SubStore is being provided, now we need to find equivalent image store and this will be found by splitting name
|
||||
prefixName := getRoutePrefix(name)
|
||||
|
||||
imgStore, ok := sc.SubStore[prefixName]
|
||||
if !ok {
|
||||
imgStore = sc.DefaultStore
|
||||
}
|
||||
|
||||
return imgStore
|
||||
}
|
||||
|
||||
return sc.DefaultStore
|
||||
}
|
||||
|
||||
// NewImageStore returns a new image store backed by a file storage.
|
||||
func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool,
|
||||
log zlog.Logger, metrics monitoring.MetricServer, linter Lint,
|
||||
) ImageStore {
|
||||
log zlog.Logger, metrics monitoring.MetricServer, linter storage.Lint,
|
||||
) storage.ImageStore {
|
||||
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(rootDir, DefaultDirPerms); err != nil {
|
||||
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
|
||||
@ -123,7 +76,7 @@ func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commi
|
||||
imgStore := &ImageStoreLocal{
|
||||
rootDir: rootDir,
|
||||
lock: &sync.RWMutex{},
|
||||
blobUploads: make(map[string]BlobUpload),
|
||||
blobUploads: make(map[string]storage.BlobUpload),
|
||||
gc: gc,
|
||||
gcDelay: gcDelay,
|
||||
dedupe: dedupe,
|
||||
@ -134,7 +87,7 @@ func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commi
|
||||
}
|
||||
|
||||
if dedupe {
|
||||
imgStore.cache = NewCache(rootDir, "cache", true, log)
|
||||
imgStore.cache = storage.NewCache(rootDir, "cache", true, log)
|
||||
}
|
||||
|
||||
if gc {
|
||||
@ -168,7 +121,7 @@ func (is *ImageStoreLocal) RUnlock(lockStart *time.Time) {
|
||||
|
||||
lockEnd := time.Now()
|
||||
latency := lockEnd.Sub(*lockStart)
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), RLOCK) // histogram
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RLOCK) // histogram
|
||||
}
|
||||
|
||||
// Lock write-lock.
|
||||
@ -184,7 +137,7 @@ func (is *ImageStoreLocal) Unlock(lockStart *time.Time) {
|
||||
|
||||
lockEnd := time.Now()
|
||||
latency := lockEnd.Sub(*lockStart)
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), RWLOCK) // histogram
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RWLOCK) // histogram
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) initRepo(name string) error {
|
||||
@ -204,7 +157,7 @@ func (is *ImageStoreLocal) initRepo(name string) error {
|
||||
return err
|
||||
}
|
||||
// create BlobUploadDir subdir
|
||||
err = ensureDir(path.Join(repoDir, BlobUploadDir), is.log)
|
||||
err = ensureDir(path.Join(repoDir, storage.BlobUploadDir), is.log)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("error creating blob upload subdir")
|
||||
|
||||
@ -295,7 +248,7 @@ func (is *ImageStoreLocal) ValidateRepo(name string) (bool, error) {
|
||||
}
|
||||
|
||||
for k, v := range found {
|
||||
if !v && k != BlobUploadDir {
|
||||
if !v && k != storage.BlobUploadDir {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@ -422,109 +375,39 @@ func (is *ImageStoreLocal) GetNextRepository(repo string) (string, error) {
|
||||
|
||||
// GetImageTags returns a list of image tags available in the specified repository.
|
||||
func (is *ImageStoreLocal) GetImageTags(repo string) ([]string, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if !is.DirExists(dir) {
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(dir, "index.json"))
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
tags := make([]string, 0)
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
tags = append(tags, v)
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
return storage.GetTagsByIndex(index), nil
|
||||
}
|
||||
|
||||
// GetImageManifest returns the image manifest of an image in the specific repository.
|
||||
func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, string, string, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if !is.DirExists(dir) {
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(dir, "index.json"))
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
found := false
|
||||
|
||||
var digest godigest.Digest
|
||||
|
||||
mediaType := ""
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if reference == manifest.Digest.String() {
|
||||
digest = manifest.Digest
|
||||
mediaType = manifest.MediaType
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
digest = manifest.Digest
|
||||
mediaType = manifest.MediaType
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
manifestDesc, found := storage.GetManifestDescByReference(index, reference)
|
||||
if !found {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
buf, err = os.ReadFile(p)
|
||||
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest.String())
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest")
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
@ -540,156 +423,7 @@ func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, str
|
||||
|
||||
monitoring.IncDownloadCounter(is.metrics, repo)
|
||||
|
||||
return buf, digest.String(), mediaType, nil
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) validateOCIManifest(repo, reference string, manifest *ispec.Manifest) (string, error) {
|
||||
if manifest.SchemaVersion != SchemaVersion {
|
||||
is.log.Error().Int("SchemaVersion", manifest.SchemaVersion).Msg("invalid manifest")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
// validate image config
|
||||
config := manifest.Config
|
||||
if config.MediaType != ispec.MediaTypeImageConfig {
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
digest := config.Digest
|
||||
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
if _, err := os.Stat(blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
|
||||
|
||||
return digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
blobFile, err := os.Open(blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
|
||||
|
||||
return digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
defer blobFile.Close()
|
||||
|
||||
dec := json.NewDecoder(blobFile)
|
||||
|
||||
var cspec ispec.Image
|
||||
if err := dec.Decode(&cspec); err != nil {
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
// validate the layers
|
||||
for _, l := range manifest.Layers {
|
||||
digest = l.Digest
|
||||
blobPath = is.BlobPath(repo, digest)
|
||||
is.log.Info().Str("blobPath", blobPath).Str("reference", reference).Msg("manifest layers")
|
||||
|
||||
if _, err := os.Stat(blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
|
||||
|
||||
return digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
before an image index manifest is pushed to a repo, its constituent manifests
|
||||
are pushed first, so when updating/removing this image index manifest, we also
|
||||
need to determine if there are other image index manifests which refer to the
|
||||
same constitutent manifests so that they can be garbage-collected correctly
|
||||
|
||||
pruneImageManifestsFromIndex is a helper routine to achieve this.
|
||||
*/
|
||||
func pruneImageManifestsFromIndex(dir string, digest godigest.Digest, // nolint: gocyclo
|
||||
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zerolog.Logger,
|
||||
) ([]ispec.Descriptor, error) {
|
||||
indexPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
buf, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imgIndex ispec.Index
|
||||
if err := json.Unmarshal(buf, &imgIndex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inUse := map[string]uint{}
|
||||
|
||||
for _, manifest := range imgIndex.Manifests {
|
||||
inUse[manifest.Digest.Encoded()]++
|
||||
}
|
||||
|
||||
for _, otherIndex := range otherImgIndexes {
|
||||
indexPath := path.Join(dir, "blobs", otherIndex.Digest.Algorithm().String(), otherIndex.Digest.Encoded())
|
||||
|
||||
buf, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var oindex ispec.Index
|
||||
if err := json.Unmarshal(buf, &oindex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, omanifest := range oindex.Manifests {
|
||||
_, ok := inUse[omanifest.Digest.Encoded()]
|
||||
if ok {
|
||||
inUse[omanifest.Digest.Encoded()]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prunedManifests := []ispec.Descriptor{}
|
||||
|
||||
// for all manifests in the index, skip those that either have a tag or
|
||||
// are used in other imgIndexes
|
||||
for _, outManifest := range outIndex.Manifests {
|
||||
if outManifest.MediaType != ispec.MediaTypeImageManifest {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := outManifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
count, ok := inUse[outManifest.Digest.Encoded()]
|
||||
if !ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
// this manifest is in use in other image indexes
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return prunedManifests, nil
|
||||
return buf, manifestDesc.Digest.String(), manifestDesc.MediaType, nil
|
||||
}
|
||||
|
||||
// PutImageManifest adds an image manifest to the repository.
|
||||
@ -702,38 +436,43 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
return "", err
|
||||
}
|
||||
|
||||
// validate the manifest
|
||||
if !IsSupportedMediaType(mediaType) {
|
||||
is.log.Debug().Interface("actual", mediaType).
|
||||
Msg("bad manifest media type")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
is.log.Debug().Int("len", len(body)).Msg("invalid body length")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
dig, err := validateManifest(is, repo, reference, mediaType, body)
|
||||
dig, err := storage.ValidateManifest(is, repo, reference, mediaType, body, is.log)
|
||||
if err != nil {
|
||||
return dig, err
|
||||
}
|
||||
|
||||
mDigest := godigest.FromBytes(body)
|
||||
refIsDigest := false
|
||||
d, err := godigest.Parse(reference)
|
||||
refIsDigest := true
|
||||
|
||||
if err == nil {
|
||||
if d.String() != mDigest.String() {
|
||||
is.log.Error().Str("actual", mDigest.String()).Str("expected", d.String()).
|
||||
Msg("manifest digest is not valid")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
mDigest, err := storage.GetAndValidateRequestDigest(body, reference, is.log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadManifest) {
|
||||
return mDigest.String(), err
|
||||
}
|
||||
|
||||
refIsDigest = true
|
||||
refIsDigest = false
|
||||
}
|
||||
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// create a new descriptor
|
||||
desc := ispec.Descriptor{
|
||||
MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
|
||||
}
|
||||
|
||||
if !refIsDigest {
|
||||
desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
|
||||
}
|
||||
|
||||
updateIndex, oldDgst, err := storage.CheckIfIndexNeedsUpdate(&index, &desc, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
}
|
||||
|
||||
var lockLatency time.Time
|
||||
@ -741,90 +480,8 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(dir, "index.json"))
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return "", zerr.ErrRepoBadVersion
|
||||
}
|
||||
|
||||
updateIndex := true
|
||||
// create a new descriptor
|
||||
desc := ispec.Descriptor{
|
||||
MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
|
||||
}
|
||||
if !refIsDigest {
|
||||
desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
|
||||
}
|
||||
|
||||
var oldDgst godigest.Digest
|
||||
|
||||
for midx, manifest := range index.Manifests {
|
||||
if reference == manifest.Digest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
if manifest.Digest.String() == mDigest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// manifest contents have changed for the same tag,
|
||||
// so update index.json descriptor
|
||||
is.log.Info().
|
||||
Int64("old size", desc.Size).
|
||||
Int64("new size", int64(len(body))).
|
||||
Str("old digest", desc.Digest.String()).
|
||||
Str("new digest", mDigest.String()).
|
||||
Str("old mediaType", manifest.MediaType).
|
||||
Str("new mediaType", mediaType).
|
||||
Msg("updating existing tag with new manifest contents")
|
||||
|
||||
// changing media-type is disallowed!
|
||||
if manifest.MediaType != mediaType {
|
||||
err = zerr.ErrBadManifest
|
||||
is.log.Error().Err(err).
|
||||
Str("old mediaType", manifest.MediaType).
|
||||
Str("new mediaType", mediaType).Msg("cannot change media-type")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
desc = manifest
|
||||
oldDgst = manifest.Digest
|
||||
desc.Size = int64(len(body))
|
||||
desc.Digest = mDigest
|
||||
|
||||
index.Manifests = append(index.Manifests[:midx], index.Manifests[midx+1:]...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
}
|
||||
|
||||
// write manifest to "blobs"
|
||||
dir = path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
|
||||
dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
|
||||
_ = ensureDir(dir, is.log)
|
||||
file := path.Join(dir, mDigest.Encoded())
|
||||
|
||||
@ -835,62 +492,38 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
return "", err
|
||||
}
|
||||
|
||||
/* additionally, unmarshal an image index and for all manifests in that
|
||||
index, ensure that they do not have a name or they are not in other
|
||||
manifest indexes else GC can never clean them */
|
||||
if (mediaType == ispec.MediaTypeImageIndex) && (oldDgst != "") {
|
||||
otherImgIndexes := []ispec.Descriptor{}
|
||||
is.Unlock(&lockLatency)
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
otherImgIndexes = append(otherImgIndexes, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
otherImgIndexes = append(otherImgIndexes, desc)
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
prunedManifests, err := pruneImageManifestsFromIndex(dir, oldDgst, index, otherImgIndexes, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
index.Manifests = prunedManifests
|
||||
err = storage.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
|
||||
// now update "index.json"
|
||||
index.Manifests = append(index.Manifests, desc)
|
||||
dir = path.Join(is.rootDir, repo)
|
||||
file = path.Join(dir, "index.json")
|
||||
buf, err = json.Marshal(index)
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err := test.Error(err); err != nil {
|
||||
is.log.Error().Err(err).Str("file", file).Msg("unable to marshal JSON")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
is.Unlock(&lockLatency)
|
||||
|
||||
// apply linter only on images, not signatures
|
||||
if is.linter != nil {
|
||||
if mediaType == ispec.MediaTypeImageManifest &&
|
||||
// check that image manifest is not cosign signature
|
||||
!strings.HasPrefix(reference, "sha256-") &&
|
||||
!strings.HasSuffix(reference, remote.SignatureTagSuffix) {
|
||||
// lint new index with new manifest before writing to disk
|
||||
is.Unlock(&lockLatency)
|
||||
pass, err := is.linter.Lint(repo, mDigest, is)
|
||||
is.Lock(&lockLatency)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("linter error")
|
||||
pass, err := storage.ApplyLinter(is, is.linter, repo, desc)
|
||||
|
||||
return "", err
|
||||
}
|
||||
is.Lock(&lockLatency)
|
||||
|
||||
if !pass {
|
||||
return "", zerr.ErrImageLintAnnotations
|
||||
}
|
||||
}
|
||||
if !pass {
|
||||
is.log.Error().Err(err).Str("repo", repo).Str("reference", reference).Msg("linter didn't pass")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = is.writeFile(file, buf)
|
||||
@ -921,100 +554,29 @@ func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string) error {
|
||||
return zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
isTag := false
|
||||
|
||||
// as per spec "reference" can be a digest and a tag
|
||||
dgst, err := godigest.Parse(reference)
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
is.log.Debug().Str("invalid digest: ", reference).Msg("storage: assuming tag")
|
||||
return err
|
||||
}
|
||||
|
||||
isTag = true
|
||||
manifestDesc, found := storage.RemoveManifestDescByReference(&index, reference)
|
||||
if !found {
|
||||
return zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
err = storage.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(dir, "index.json"))
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
found := false
|
||||
|
||||
isImageIndex := false
|
||||
|
||||
var manifest ispec.Descriptor
|
||||
|
||||
// we are deleting, so keep only those manifests that don't match
|
||||
outIndex := index
|
||||
outIndex.Manifests = []ispec.Descriptor{}
|
||||
|
||||
otherImgIndexes := []ispec.Descriptor{}
|
||||
|
||||
for _, manifest = range index.Manifests {
|
||||
if isTag {
|
||||
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && tag == reference {
|
||||
is.log.Debug().Str("deleting tag", tag).Msg("")
|
||||
|
||||
dgst = manifest.Digest
|
||||
|
||||
found = true
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
isImageIndex = true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
} else if reference == manifest.Digest.String() {
|
||||
is.log.Debug().Str("deleting reference", reference).Msg("")
|
||||
found = true
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
isImageIndex = true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
outIndex.Manifests = append(outIndex.Manifests, manifest)
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
otherImgIndexes = append(otherImgIndexes, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
/* additionally, unmarshal an image index and for all manifests in that
|
||||
index, ensure that they do not have a name or they are not in other
|
||||
manifest indexes else GC can never clean them */
|
||||
if isImageIndex {
|
||||
prunedManifests, err := pruneImageManifestsFromIndex(dir, dgst, outIndex, otherImgIndexes, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outIndex.Manifests = prunedManifests
|
||||
}
|
||||
|
||||
// now update "index.json"
|
||||
dir = path.Join(is.rootDir, repo)
|
||||
file := path.Join(dir, "index.json")
|
||||
buf, err = json.Marshal(outIndex)
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1033,8 +595,8 @@ func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string) error {
|
||||
// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.
|
||||
toDelete := true
|
||||
|
||||
for _, manifest = range outIndex.Manifests {
|
||||
if dgst.String() == manifest.Digest.String() {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifestDesc.Digest.String() == manifest.Digest.String() {
|
||||
toDelete = false
|
||||
|
||||
break
|
||||
@ -1042,7 +604,7 @@ func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string) error {
|
||||
}
|
||||
|
||||
if toDelete {
|
||||
p := path.Join(dir, "blobs", dgst.Algorithm().String(), dgst.Encoded())
|
||||
p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())
|
||||
|
||||
_ = os.Remove(p)
|
||||
}
|
||||
@ -1055,7 +617,7 @@ func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string) error {
|
||||
// BlobUploadPath returns the upload path for a blob in this store.
|
||||
func (is *ImageStoreLocal) BlobUploadPath(repo, uuid string) string {
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
blobUploadPath := path.Join(dir, BlobUploadDir, uuid)
|
||||
blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid)
|
||||
|
||||
return blobUploadPath
|
||||
}
|
||||
@ -1781,27 +1343,14 @@ func (is *ImageStoreLocal) GetReferrers(repo, digest, artifactType string) ([]ar
|
||||
return nil, zerr.ErrBadBlobDigest
|
||||
}
|
||||
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(dir, "index.json"))
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found := false
|
||||
|
||||
result := []artifactspec.Descriptor{}
|
||||
@ -1813,8 +1362,7 @@ func (is *ImageStoreLocal) GetReferrers(repo, digest, artifactType string) ([]ar
|
||||
|
||||
p := path.Join(dir, "blobs", manifest.Digest.Algorithm().String(), manifest.Digest.Encoded())
|
||||
|
||||
buf, err = os.ReadFile(p)
|
||||
|
||||
buf, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest")
|
||||
|
||||
@ -1878,12 +1426,6 @@ func (is *ImageStoreLocal) writeFile(filename string, data []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func IsSupportedMediaType(mediaType string) bool {
|
||||
return mediaType == ispec.MediaTypeImageIndex ||
|
||||
mediaType == ispec.MediaTypeImageManifest ||
|
||||
mediaType == artifactspec.MediaTypeArtifactManifest
|
||||
}
|
||||
|
||||
// utility routines
|
||||
|
||||
func ValidateHardLink(rootDir string) error {
|
||||
@ -1941,37 +1483,6 @@ func (is *ImageStoreLocal) garbageCollect(dir string, repo string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateManifest(imgStore *ImageStoreLocal, repo, reference,
|
||||
mediaType string, body []byte,
|
||||
) (string, error) {
|
||||
if mediaType == ispec.MediaTypeImageManifest {
|
||||
var manifest ispec.Manifest
|
||||
if err := json.Unmarshal(body, &manifest); err != nil {
|
||||
imgStore.log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if manifest.Config.MediaType == ispec.MediaTypeImageConfig {
|
||||
digest, err := imgStore.validateOCIManifest(repo, reference, &manifest)
|
||||
if err != nil {
|
||||
imgStore.log.Error().Err(err).Msg("invalid oci image manifest")
|
||||
|
||||
return digest, err
|
||||
}
|
||||
}
|
||||
} else if mediaType == artifactspec.MediaTypeArtifactManifest {
|
||||
var m notation.Descriptor
|
||||
if err := json.Unmarshal(body, &m); err != nil {
|
||||
imgStore.log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func ifOlderThan(imgStore *ImageStoreLocal, repo string, delay time.Duration) casext.GCPolicy {
|
||||
return func(ctx context.Context, digest godigest.Digest) (bool, error) {
|
||||
blobPath := imgStore.BlobPath(repo, digest)
|
@ -1,7 +1,7 @@
|
||||
//go:build needprivileges
|
||||
// +build needprivileges
|
||||
|
||||
package storage_test
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -18,6 +18,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
)
|
||||
|
||||
func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
@ -26,7 +27,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
upload, err := imgStore.NewBlobUpload("dedupe1")
|
||||
So(err, ShouldBeNil)
|
@ -1,4 +1,4 @@
|
||||
package storage_test
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -27,11 +27,13 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
const (
|
||||
tag = "1.0"
|
||||
tag = "1.0"
|
||||
repoName = "test"
|
||||
)
|
||||
|
||||
func TestStorageFSAPIs(t *testing.T) {
|
||||
@ -39,7 +41,7 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
Convey("Repo layout", t, func(c C) {
|
||||
@ -171,10 +173,10 @@ func TestGetReferrers(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
Convey("Get referrers", t, func(c C) {
|
||||
err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, "zot-test"))
|
||||
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, "zot-test"))
|
||||
So(err, ShouldBeNil)
|
||||
body := []byte("this is a blob")
|
||||
digest := godigest.FromBytes(body)
|
||||
@ -221,7 +223,7 @@ func FuzzNewBlobUpload(f *testing.F) {
|
||||
t.Logf("Input argument is %s", data)
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
_, err := imgStore.NewBlobUpload(data)
|
||||
if err != nil {
|
||||
@ -241,7 +243,7 @@ func FuzzPutBlobChunk(f *testing.F) {
|
||||
t.Logf("Input argument is %s", data)
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
repoName := data
|
||||
uuid, err := imgStore.NewBlobUpload(repoName)
|
||||
@ -269,7 +271,7 @@ func FuzzPutBlobChunkStreamed(f *testing.F) {
|
||||
t.Logf("Input argument is %s", data)
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -296,7 +298,7 @@ func FuzzGetBlobUpload(f *testing.F) {
|
||||
defer os.RemoveAll(dir)
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
_, err := imgStore.GetBlobUpload(data1, data2)
|
||||
if err != nil {
|
||||
@ -316,7 +318,7 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
|
||||
@ -362,7 +364,7 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
|
||||
@ -415,7 +417,7 @@ func FuzzTestDeleteImageManifest(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
digest, _, err := newRandomBlobForFuzz(data)
|
||||
if err != nil {
|
||||
@ -433,7 +435,7 @@ func FuzzTestDeleteImageManifest(f *testing.F) {
|
||||
|
||||
func FuzzDirExists(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data string) { //nolint: unusedparams
|
||||
_ = storage.DirExists(data)
|
||||
_ = local.DirExists(data)
|
||||
})
|
||||
}
|
||||
|
||||
@ -445,7 +447,7 @@ func FuzzInitRepo(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
err := imgStore.InitRepo(data)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
@ -464,7 +466,7 @@ func FuzzInitValidateRepo(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
err := imgStore.InitRepo(data)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
@ -490,7 +492,7 @@ func FuzzGetImageTags(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
_, err := imgStore.GetImageTags(data)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoNotFound) || isKnownErr(err) {
|
||||
@ -509,7 +511,7 @@ func FuzzBlobUploadPath(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
_ = imgStore.BlobUploadPath(repo, uuid)
|
||||
})
|
||||
@ -523,7 +525,7 @@ func FuzzBlobUploadInfo(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
repo := data
|
||||
|
||||
_, err := imgStore.BlobUploadInfo(repo, uuid)
|
||||
@ -543,7 +545,7 @@ func FuzzTestGetImageManifest(f *testing.F) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -566,7 +568,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
repoName := data
|
||||
|
||||
@ -610,7 +612,7 @@ func FuzzFullBlobUpload(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
ldigest, lblob, err := newRandomBlobForFuzz(data)
|
||||
if err != nil {
|
||||
@ -635,7 +637,7 @@ func FuzzDedupeBlob(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
blobDigest := godigest.FromString(data)
|
||||
|
||||
@ -671,7 +673,7 @@ func FuzzDeleteBlobUpload(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
uuid, err := imgStore.NewBlobUpload(repoName)
|
||||
if err != nil {
|
||||
@ -697,7 +699,7 @@ func FuzzBlobPath(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_ = imgStore.BlobPath(repoName, digest)
|
||||
@ -713,7 +715,7 @@ func FuzzCheckBlob(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
@ -739,7 +741,7 @@ func FuzzGetBlob(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
@ -772,7 +774,7 @@ func FuzzDeleteBlob(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
@ -802,7 +804,7 @@ func FuzzGetIndexContent(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
@ -832,7 +834,7 @@ func FuzzGetBlobContent(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
@ -861,9 +863,9 @@ func FuzzGetReferrers(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, "zot-test"))
|
||||
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, "zot-test"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -916,7 +918,7 @@ func FuzzRunGCRepo(f *testing.F) {
|
||||
dir := t.TempDir()
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
|
||||
if err := imgStore.RunGCRepo(data); err != nil {
|
||||
t.Error(err)
|
||||
@ -929,7 +931,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
Convey("Dedupe", t, func(c C) {
|
||||
@ -1101,7 +1103,7 @@ func TestDedupe(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
il := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
il := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
|
||||
|
||||
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
|
||||
})
|
||||
@ -1116,10 +1118,10 @@ func TestNegativeCases(t *testing.T) {
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
So(storage.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
So(local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil), ShouldNotBeNil)
|
||||
if os.Geteuid() != 0 {
|
||||
So(storage.NewImageStore("/deadBEEF", true, storage.DefaultGCDelay,
|
||||
So(local.NewImageStore("/deadBEEF", true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil), ShouldBeNil)
|
||||
}
|
||||
})
|
||||
@ -1129,7 +1131,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
err := os.Chmod(dir, 0o000) // remove all perms
|
||||
@ -1169,7 +1171,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
@ -1276,7 +1278,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
})
|
||||
|
||||
Convey("Invalid get image tags", t, func(c C) {
|
||||
var ilfs storage.ImageStoreLocal
|
||||
var ilfs local.ImageStoreLocal
|
||||
_, err := ilfs.GetImageTags("test")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
@ -1284,7 +1286,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
@ -1300,7 +1302,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
})
|
||||
|
||||
Convey("Invalid get image manifest", t, func(c C) {
|
||||
var ilfs storage.ImageStoreLocal
|
||||
var ilfs local.ImageStoreLocal
|
||||
_, _, _, err := ilfs.GetImageManifest("test", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
@ -1308,7 +1310,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
@ -1350,7 +1352,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
So(imgStore, ShouldNotBeNil)
|
||||
@ -1419,7 +1421,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ok := storage.DirExists(filePath)
|
||||
ok := local.DirExists(filePath)
|
||||
So(ok, ShouldBeFalse)
|
||||
})
|
||||
|
||||
@ -1427,7 +1429,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
filePath := path.Join(dir, "hi \255")
|
||||
ok := storage.DirExists(filePath)
|
||||
ok := local.DirExists(filePath)
|
||||
So(ok, ShouldBeFalse)
|
||||
})
|
||||
|
||||
@ -1440,7 +1442,7 @@ func TestNegativeCases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
path := builder.String()
|
||||
ok := storage.DirExists(path)
|
||||
ok := local.DirExists(path)
|
||||
So(ok, ShouldBeFalse)
|
||||
})
|
||||
}
|
||||
@ -1462,7 +1464,7 @@ func TestHardLink(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(randomDir)
|
||||
|
||||
err := storage.ValidateHardLink(randomDir)
|
||||
err := local.ValidateHardLink(randomDir)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
Convey("Test that ValidateHardLink returns error if rootDir is a file", t, func() {
|
||||
@ -1474,13 +1476,13 @@ func TestHardLink(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = storage.ValidateHardLink(filePath)
|
||||
err = local.ValidateHardLink(filePath)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("Test if filesystem supports hardlink", t, func() {
|
||||
dir := t.TempDir()
|
||||
|
||||
err := storage.ValidateHardLink(dir)
|
||||
err := local.ValidateHardLink(dir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = os.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0o644) //nolint: gosec
|
||||
@ -1516,7 +1518,7 @@ func TestInjectWriteFile(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
Convey("Failure path1", func() {
|
||||
@ -1547,7 +1549,7 @@ func TestInjectWriteFile(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, false, log, metrics, nil)
|
||||
|
||||
Convey("Failure path not reached", func() {
|
||||
@ -1565,7 +1567,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
Convey("Garbage collect with default/long delay", func() {
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
repoName := "gc-long"
|
||||
|
||||
@ -1633,7 +1635,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
})
|
||||
|
||||
Convey("Garbage collect with short delay", func() {
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
repoName := "gc-short"
|
||||
|
||||
// upload orphan blob
|
||||
@ -1729,7 +1731,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
|
||||
Convey("Garbage collect with dedupe", func() {
|
||||
// garbage-collect is repo-local and dedupe is global and they can interact in strange ways
|
||||
imgStore := storage.NewImageStore(dir, true, 5*time.Second, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, 5*time.Second, true, true, log, metrics, nil)
|
||||
|
||||
// first upload an image to the first repo and wait for GC timeout
|
||||
|
||||
@ -1930,16 +1932,16 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
|
||||
log := log.NewLogger("debug", logFile.Name())
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
repoName := "gc-all-repos-short"
|
||||
|
||||
err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, repoName))
|
||||
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var manifestDigest godigest.Digest
|
||||
manifestDigest, _, _ = test.GetOciLayoutDigests("../../test/data/zot-test")
|
||||
manifestDigest, _, _ = test.GetOciLayoutDigests("../../../test/data/zot-test")
|
||||
err = os.Remove(path.Join(dir, repoName, "blobs/sha256", manifestDigest.Encoded()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -1963,10 +1965,10 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
||||
|
||||
log := log.NewLogger("debug", logFile.Name())
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
|
||||
repoName := "gc-all-repos-short"
|
||||
|
||||
err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, repoName))
|
||||
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -2009,7 +2011,7 @@ func TestInitRepo(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
|
||||
@ -2026,7 +2028,7 @@ func TestValidateRepo(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
|
||||
@ -2043,7 +2045,7 @@ func TestGetRepositoriesError(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil,
|
||||
)
|
||||
|
||||
@ -2063,18 +2065,18 @@ func TestGetNextRepository(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil,
|
||||
)
|
||||
firstRepoName := "repo1"
|
||||
secondRepoName := "repo2"
|
||||
|
||||
err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, firstRepoName))
|
||||
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, firstRepoName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = test.CopyFiles("../../test/data/zot-test", path.Join(dir, secondRepoName))
|
||||
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, secondRepoName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -2100,7 +2102,7 @@ func TestPutBlobChunkStreamed(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
uuid, err := imgStore.NewBlobUpload("test")
|
||||
@ -2124,7 +2126,7 @@ func TestPullRange(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
Convey("Negative cases", func() {
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
repoName := "pull-range"
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -24,7 +23,6 @@ import (
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sigstore/cosign/pkg/oci/remote"
|
||||
zerr "zotregistry.io/zot/errors"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
zlog "zotregistry.io/zot/pkg/log"
|
||||
@ -34,8 +32,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
RLOCK = "RLock"
|
||||
RWLOCK = "RWLock"
|
||||
CacheDBName = "s3_cache"
|
||||
)
|
||||
|
||||
@ -115,7 +111,7 @@ func (is *ObjectStorage) RUnlock(lockStart *time.Time) {
|
||||
lockEnd := time.Now()
|
||||
// includes time spent in acquiring and holding a lock
|
||||
latency := lockEnd.Sub(*lockStart)
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), RLOCK) // histogram
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RLOCK) // histogram
|
||||
}
|
||||
|
||||
// Lock write-lock.
|
||||
@ -132,7 +128,7 @@ func (is *ObjectStorage) Unlock(lockStart *time.Time) {
|
||||
lockEnd := time.Now()
|
||||
// includes time spent in acquiring and holding a lock
|
||||
latency := lockEnd.Sub(*lockStart)
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), RWLOCK) // histogram
|
||||
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RWLOCK) // histogram
|
||||
}
|
||||
|
||||
func (is *ObjectStorage) initRepo(name string) error {
|
||||
@ -305,88 +301,36 @@ func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
buf, err := is.GetIndexContent(repo)
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
tags := make([]string, 0)
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
tags = append(tags, v)
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
return storage.GetTagsByIndex(index), nil
|
||||
}
|
||||
|
||||
// GetImageManifest returns the image manifest of an image in the specific repository.
|
||||
func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, string, string, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
buf, err := is.GetIndexContent(repo)
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
found := false
|
||||
|
||||
var digest godigest.Digest
|
||||
|
||||
mediaType := ""
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if reference == manifest.Digest.String() {
|
||||
digest = manifest.Digest
|
||||
mediaType = manifest.MediaType
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
digest = manifest.Digest
|
||||
mediaType = manifest.MediaType
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
manifestDesc, found := storage.GetManifestDescByReference(index, reference)
|
||||
if !found {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
manifestPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
|
||||
buf, err = is.store.GetContent(context.Background(), manifestPath)
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest.String())
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", manifestPath).Msg("failed to read manifest")
|
||||
if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
return nil, "", "", err
|
||||
}
|
||||
@ -400,101 +344,7 @@ func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, strin
|
||||
|
||||
monitoring.IncDownloadCounter(is.metrics, repo)
|
||||
|
||||
return buf, digest.String(), mediaType, nil
|
||||
}
|
||||
|
||||
/**
|
||||
before an image index manifest is pushed to a repo, its constituent manifests
|
||||
are pushed first, so when updating/removing this image index manifest, we also
|
||||
need to determine if there are other image index manifests which refer to the
|
||||
same constitutent manifests so that they can be garbage-collected correctly
|
||||
|
||||
pruneImageManifestsFromIndex is a helper routine to achieve this.
|
||||
*/
|
||||
func (is *ObjectStorage) pruneImageManifestsFromIndex(dir string, digest godigest.Digest, // nolint: gocyclo
|
||||
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zerolog.Logger,
|
||||
) ([]ispec.Descriptor, error) {
|
||||
indexPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
buf, err := is.store.GetContent(context.Background(), indexPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imgIndex ispec.Index
|
||||
if err := json.Unmarshal(buf, &imgIndex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inUse := map[string]uint{}
|
||||
|
||||
for _, manifest := range imgIndex.Manifests {
|
||||
inUse[manifest.Digest.Encoded()]++
|
||||
}
|
||||
|
||||
for _, otherIndex := range otherImgIndexes {
|
||||
indexPath := path.Join(dir, "blobs", otherIndex.Digest.Algorithm().String(), otherIndex.Digest.Encoded())
|
||||
|
||||
buf, err := is.store.GetContent(context.Background(), indexPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var oindex ispec.Index
|
||||
if err := json.Unmarshal(buf, &oindex); err != nil {
|
||||
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, omanifest := range oindex.Manifests {
|
||||
_, ok := inUse[omanifest.Digest.Encoded()]
|
||||
if ok {
|
||||
inUse[omanifest.Digest.Encoded()]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prunedManifests := []ispec.Descriptor{}
|
||||
|
||||
// for all manifests in the index, skip those that either have a tag or
|
||||
// are used in other imgIndexes
|
||||
for _, outManifest := range outIndex.Manifests {
|
||||
if outManifest.MediaType != ispec.MediaTypeImageManifest {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := outManifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
count, ok := inUse[outManifest.Digest.Encoded()]
|
||||
if !ok {
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
// this manifest is in use in other image indexes
|
||||
prunedManifests = append(prunedManifests, outManifest)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return prunedManifests, nil
|
||||
return buf, manifestDesc.Digest.String(), manifestDesc.MediaType, nil
|
||||
}
|
||||
|
||||
// PutImageManifest adds an image manifest to the repository.
|
||||
@ -507,148 +357,52 @@ func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //n
|
||||
return "", err
|
||||
}
|
||||
|
||||
// validate the manifest
|
||||
if !storage.IsSupportedMediaType(mediaType) {
|
||||
is.log.Debug().Interface("actual", mediaType).
|
||||
Msg("bad manifest media type")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
dig, err := storage.ValidateManifest(is, repo, reference, mediaType, body, is.log)
|
||||
if err != nil {
|
||||
return dig, err
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
is.log.Debug().Int("len", len(body)).Msg("invalid body length")
|
||||
refIsDigest := true
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
var imageManifest ispec.Manifest
|
||||
if err := json.Unmarshal(body, &imageManifest); err != nil {
|
||||
is.log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
if imageManifest.SchemaVersion != storage.SchemaVersion {
|
||||
is.log.Error().Int("SchemaVersion", imageManifest.SchemaVersion).Msg("invalid manifest")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
for _, l := range imageManifest.Layers {
|
||||
digest := l.Digest
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
is.log.Info().Str("blobPath", blobPath).Str("reference", reference).Msg("manifest layers")
|
||||
|
||||
if _, err := is.store.Stat(context.Background(), blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
|
||||
|
||||
return digest.String(), zerr.ErrBlobNotFound
|
||||
}
|
||||
}
|
||||
|
||||
mDigest := godigest.FromBytes(body)
|
||||
refIsDigest := false
|
||||
dgst, err := godigest.Parse(reference)
|
||||
|
||||
if err == nil {
|
||||
if dgst.String() != mDigest.String() {
|
||||
is.log.Error().Str("actual", mDigest.String()).Str("expected", dgst.String()).
|
||||
Msg("manifest digest is not valid")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
mDigest, err := storage.GetAndValidateRequestDigest(body, reference, is.log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadManifest) {
|
||||
return mDigest.String(), err
|
||||
}
|
||||
|
||||
refIsDigest = true
|
||||
refIsDigest = false
|
||||
}
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
buf, err := is.GetIndexContent(repo)
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// create a new descriptor
|
||||
desc := ispec.Descriptor{
|
||||
MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
|
||||
}
|
||||
|
||||
if !refIsDigest {
|
||||
desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
|
||||
}
|
||||
|
||||
updateIndex, oldDgst, err := storage.CheckIfIndexNeedsUpdate(&index, &desc, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
}
|
||||
|
||||
var lockLatency time.Time
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return "", zerr.ErrRepoBadVersion
|
||||
}
|
||||
|
||||
updateIndex := true
|
||||
// create a new descriptor
|
||||
desc := ispec.Descriptor{
|
||||
MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
|
||||
Platform: &ispec.Platform{Architecture: "amd64", OS: "linux"},
|
||||
}
|
||||
if !refIsDigest {
|
||||
desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
|
||||
}
|
||||
|
||||
var oldDgst godigest.Digest
|
||||
|
||||
for midx, manifest := range index.Manifests {
|
||||
if reference == manifest.Digest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
v, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && v == reference {
|
||||
if manifest.Digest.String() == mDigest.String() {
|
||||
// nothing changed, so don't update
|
||||
desc = manifest
|
||||
updateIndex = false
|
||||
|
||||
break
|
||||
}
|
||||
// manifest contents have changed for the same tag,
|
||||
// so update index.json descriptor
|
||||
|
||||
is.log.Info().
|
||||
Int64("old size", desc.Size).
|
||||
Int64("new size", int64(len(body))).
|
||||
Str("old digest", desc.Digest.String()).
|
||||
Str("new digest", mDigest.String()).
|
||||
Str("old digest", desc.Digest.String()).
|
||||
Str("new digest", mDigest.String()).
|
||||
Msg("updating existing tag with new manifest contents")
|
||||
|
||||
// changing media-type is disallowed!
|
||||
if manifest.MediaType != mediaType {
|
||||
err = zerr.ErrBadManifest
|
||||
is.log.Error().Err(err).
|
||||
Str("old mediaType", manifest.MediaType).
|
||||
Str("new mediaType", mediaType).Msg("cannot change media-type")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
desc = manifest
|
||||
oldDgst = manifest.Digest
|
||||
desc.Size = int64(len(body))
|
||||
desc.Digest = mDigest
|
||||
|
||||
index.Manifests = append(index.Manifests[:midx], index.Manifests[midx+1:]...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
}
|
||||
|
||||
// write manifest to "blobs"
|
||||
dir = path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
|
||||
dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
|
||||
manifestPath := path.Join(dir, mDigest.Encoded())
|
||||
|
||||
if err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {
|
||||
@ -657,60 +411,34 @@ func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //n
|
||||
return "", err
|
||||
}
|
||||
|
||||
/* additionally, unmarshal an image index and for all manifests in that
|
||||
index, ensure that they do not have a name or they are not in other
|
||||
manifest indexes else GC can never clean them */
|
||||
if (mediaType == ispec.MediaTypeImageIndex) && (oldDgst != "") {
|
||||
otherImgIndexes := []ispec.Descriptor{}
|
||||
is.Unlock(&lockLatency)
|
||||
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
otherImgIndexes = append(otherImgIndexes, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
otherImgIndexes = append(otherImgIndexes, desc)
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
|
||||
prunedManifests, err := is.pruneImageManifestsFromIndex(dir, oldDgst, index, otherImgIndexes, is.log)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
index.Manifests = prunedManifests
|
||||
err = storage.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)
|
||||
is.Lock(&lockLatency)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// now update "index.json"
|
||||
index.Manifests = append(index.Manifests, desc)
|
||||
dir = path.Join(is.rootDir, repo)
|
||||
indexPath := path.Join(dir, "index.json")
|
||||
buf, err = json.Marshal(index)
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
is.Unlock(&lockLatency)
|
||||
// apply linter only on images, not signatures
|
||||
if is.linter != nil {
|
||||
if mediaType == ispec.MediaTypeImageManifest &&
|
||||
// check that image manifest is not cosign signature
|
||||
!strings.HasPrefix(reference, "sha256-") &&
|
||||
!strings.HasSuffix(reference, remote.SignatureTagSuffix) {
|
||||
// lint new index with new manifest before writing to disk
|
||||
pass, err := is.linter.Lint(repo, mDigest, is)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("linter error")
|
||||
pass, err := storage.ApplyLinter(is, is.linter, repo, desc)
|
||||
is.Lock(&lockLatency)
|
||||
if !pass {
|
||||
is.log.Error().Err(err).Str("repo", repo).Str("reference", reference).Msg("linter didn't pass")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !pass {
|
||||
return "", zerr.ErrImageLintAnnotations
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {
|
||||
@ -734,98 +462,29 @@ func (is *ObjectStorage) DeleteImageManifest(repo, reference string) error {
|
||||
return zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
isTag := false
|
||||
|
||||
// as per spec "reference" can only be a digest and not a tag
|
||||
dgst, err := godigest.Parse(reference)
|
||||
if err != nil {
|
||||
is.log.Debug().Str("invalid digest: ", reference).Msg("storage: assuming tag")
|
||||
|
||||
isTag = true
|
||||
}
|
||||
|
||||
buf, err := is.GetIndexContent(repo)
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var index ispec.Index
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
found := false
|
||||
|
||||
isImageIndex := false
|
||||
|
||||
var manifest ispec.Descriptor
|
||||
|
||||
// we are deleting, so keep only those manifests that don't match
|
||||
outIndex := index
|
||||
outIndex.Manifests = []ispec.Descriptor{}
|
||||
|
||||
otherImgIndexes := []ispec.Descriptor{}
|
||||
|
||||
for _, manifest = range index.Manifests {
|
||||
if isTag {
|
||||
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if ok && tag == reference {
|
||||
is.log.Debug().Str("deleting tag", tag).Msg("")
|
||||
|
||||
dgst = manifest.Digest
|
||||
|
||||
found = true
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
isImageIndex = true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
} else if reference == manifest.Digest.String() {
|
||||
is.log.Debug().Str("deleting reference", reference).Msg("")
|
||||
found = true
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
isImageIndex = true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
outIndex.Manifests = append(outIndex.Manifests, manifest)
|
||||
|
||||
if manifest.MediaType == ispec.MediaTypeImageIndex {
|
||||
otherImgIndexes = append(otherImgIndexes, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
manifestDesc, found := storage.RemoveManifestDescByReference(&index, reference)
|
||||
if !found {
|
||||
return zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
err = storage.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
/* additionally, unmarshal an image index and for all manifests in that
|
||||
index, ensure that they do not have a name or they are not in other
|
||||
manifest indexes else GC can never clean them */
|
||||
if isImageIndex {
|
||||
prunedManifests, err := is.pruneImageManifestsFromIndex(dir, dgst, outIndex, otherImgIndexes, is.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outIndex.Manifests = prunedManifests
|
||||
}
|
||||
|
||||
// now update "index.json"
|
||||
dir = path.Join(is.rootDir, repo)
|
||||
file := path.Join(dir, "index.json")
|
||||
buf, err = json.Marshal(outIndex)
|
||||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -840,8 +499,8 @@ func (is *ObjectStorage) DeleteImageManifest(repo, reference string) error {
|
||||
// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.
|
||||
toDelete := true
|
||||
|
||||
for _, manifest = range outIndex.Manifests {
|
||||
if dgst.String() == manifest.Digest.String() {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifestDesc.Digest.String() == manifest.Digest.String() {
|
||||
toDelete = false
|
||||
|
||||
break
|
||||
@ -849,7 +508,7 @@ func (is *ObjectStorage) DeleteImageManifest(repo, reference string) error {
|
||||
}
|
||||
|
||||
if toDelete {
|
||||
p := path.Join(dir, "blobs", dgst.Algorithm().String(), dgst.Encoded())
|
||||
p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())
|
||||
|
||||
err = is.store.Delete(context.Background(), p)
|
||||
if err != nil {
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -30,7 +31,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
imgStore := storage.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
|
||||
true, true, log, metrics, nil)
|
||||
|
||||
Convey("Scrub only one repo", t, func(c C) {
|
||||
|
46
pkg/storage/storage_controller.go
Normal file
46
pkg/storage/storage_controller.go
Normal file
@ -0,0 +1,46 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StoreController struct {
|
||||
DefaultStore ImageStore
|
||||
SubStore map[string]ImageStore
|
||||
}
|
||||
|
||||
// BlobUpload models and upload request.
|
||||
type BlobUpload struct {
|
||||
StoreName string
|
||||
ID string
|
||||
}
|
||||
|
||||
func getRoutePrefix(name string) string {
|
||||
names := strings.SplitN(name, "/", 2) //nolint:gomnd
|
||||
|
||||
if len(names) != 2 { //nolint:gomnd
|
||||
// it means route is of global storage e.g "centos:latest"
|
||||
if len(names) == 1 {
|
||||
return "/"
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/%s", names[0])
|
||||
}
|
||||
|
||||
func (sc StoreController) GetImageStore(name string) ImageStore {
|
||||
if sc.SubStore != nil {
|
||||
// SubStore is being provided, now we need to find equivalent image store and this will be found by splitting name
|
||||
prefixName := getRoutePrefix(name)
|
||||
|
||||
imgStore, ok := sc.SubStore[prefixName]
|
||||
if !ok {
|
||||
imgStore = sc.DefaultStore
|
||||
}
|
||||
|
||||
return imgStore
|
||||
}
|
||||
|
||||
return sc.DefaultStore
|
||||
}
|
@ -27,6 +27,7 @@ import (
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/local"
|
||||
"zotregistry.io/zot/pkg/storage/s3"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
@ -121,7 +122,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
|
||||
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
imgStore = storage.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
imgStore = local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, nil)
|
||||
}
|
||||
|
||||
@ -712,7 +713,7 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
} else {
|
||||
tdir = t.TempDir()
|
||||
|
||||
imgStore = storage.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
|
||||
imgStore = local.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, &mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
|
||||
return false, nil
|
||||
@ -772,7 +773,7 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
},
|
||||
}, store)
|
||||
} else {
|
||||
imgStore = storage.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
|
||||
imgStore = local.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
|
||||
true, log, metrics, &mocks.MockedLint{
|
||||
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
|
||||
// nolint: goerr113
|
||||
@ -828,13 +829,13 @@ func TestStorageHandler(t *testing.T) {
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
|
||||
// Create ImageStore
|
||||
firstStore = storage.NewImageStore(firstRootDir, false, storage.DefaultGCDelay,
|
||||
firstStore = local.NewImageStore(firstRootDir, false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
|
||||
secondStore = storage.NewImageStore(secondRootDir, false,
|
||||
secondStore = local.NewImageStore(secondRootDir, false,
|
||||
storage.DefaultGCDelay, false, false, log, metrics, nil)
|
||||
|
||||
thirdStore = storage.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay,
|
||||
thirdStore = local.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user