refactor(digests): standardise representation of digests to digest.Digest (#898)
- Digests were represented by different ways - We needed a uniform way to represent the digests and enforce a format - also replace usage of github.com/google/go-containerregistry/pkg/v1 with github.com/opencontainers/image-spec/specs-go/v1 Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 96b2f29d6d57070a913ce419149cd481c0723815) (cherry picked from commit 3d41b583daea654c98378ce3dcb78937d71538e8) Co-authored-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com>
This commit is contained in:
parent
5f99f9a445
commit
ac6c6a844c
@ -3294,12 +3294,12 @@ func TestCrossRepoMount(t *testing.T) {
|
||||
baseURL, constants.RoutePrefix, constants.Blobs, godigest.SHA256, blob))
|
||||
|
||||
// Check os.SameFile here
|
||||
cachePath := path.Join(ctlr.Config.Storage.RootDirectory, "zot-d-test", "blobs/sha256", dgst.Hex())
|
||||
cachePath := path.Join(ctlr.Config.Storage.RootDirectory, "zot-d-test", "blobs/sha256", dgst.Encoded())
|
||||
|
||||
cacheFi, err := os.Stat(cachePath)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
linkPath := path.Join(ctlr.Config.Storage.RootDirectory, "zot-mount-test", "blobs/sha256", dgst.Hex())
|
||||
linkPath := path.Join(ctlr.Config.Storage.RootDirectory, "zot-mount-test", "blobs/sha256", dgst.Encoded())
|
||||
|
||||
linkFi, err := os.Stat(linkPath)
|
||||
So(err, ShouldBeNil)
|
||||
@ -3318,7 +3318,7 @@ func TestCrossRepoMount(t *testing.T) {
|
||||
So(test.Location(baseURL, postResponse), ShouldEqual, fmt.Sprintf("%s%s/zot-mount1-test/%s/%s:%s",
|
||||
baseURL, constants.RoutePrefix, constants.Blobs, godigest.SHA256, blob))
|
||||
|
||||
linkPath = path.Join(ctlr.Config.Storage.RootDirectory, "zot-mount1-test", "blobs/sha256", dgst.Hex())
|
||||
linkPath = path.Join(ctlr.Config.Storage.RootDirectory, "zot-mount1-test", "blobs/sha256", dgst.Encoded())
|
||||
|
||||
linkFi, err = os.Stat(linkPath)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
notreg "github.com/notaryproject/notation-go/registry"
|
||||
"github.com/opencontainers/distribution-spec/specs-go/v1/extensions"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
|
||||
@ -326,7 +327,7 @@ func (rh *RouteHandler) CheckManifest(response http.ResponseWriter, request *htt
|
||||
return
|
||||
}
|
||||
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
response.Header().Set("Content-Length", fmt.Sprintf("%d", len(content)))
|
||||
response.Header().Set("Content-Type", mediaType)
|
||||
response.WriteHeader(http.StatusOK)
|
||||
@ -393,7 +394,7 @@ func (rh *RouteHandler) GetManifest(response http.ResponseWriter, request *http.
|
||||
return
|
||||
}
|
||||
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
WriteData(response, http.StatusOK, mediaType, content)
|
||||
}
|
||||
|
||||
@ -463,7 +464,7 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
|
||||
NewErrorList(NewError(MANIFEST_INVALID, map[string]string{"reference": reference})))
|
||||
} else if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"blob": digest})))
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"blob": digest.String()})))
|
||||
} else if errors.Is(err, zerr.ErrRepoBadVersion) {
|
||||
WriteJSON(response, http.StatusInternalServerError,
|
||||
NewErrorList(NewError(INVALID_INDEX, map[string]string{"name": name})))
|
||||
@ -488,7 +489,7 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
|
||||
}
|
||||
|
||||
response.Header().Set("Location", fmt.Sprintf("/v2/%s/manifests/%s", name, digest))
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
response.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
@ -564,23 +565,27 @@ func (rh *RouteHandler) CheckBlob(response http.ResponseWriter, request *http.Re
|
||||
|
||||
imgStore := rh.getImageStore(name)
|
||||
|
||||
digest, ok := vars["digest"]
|
||||
if !ok || digest == "" {
|
||||
digestStr, ok := vars["digest"]
|
||||
|
||||
if !ok || digestStr == "" {
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
digest := godigest.Digest(digestStr)
|
||||
|
||||
ok, blen, err := imgStore.CheckBlob(name, digest)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response,
|
||||
http.StatusBadRequest,
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest.String()})))
|
||||
} else if errors.Is(err, zerr.ErrRepoNotFound) {
|
||||
WriteJSON(response, http.StatusNotFound, NewErrorList(NewError(NAME_UNKNOWN, map[string]string{"name": name})))
|
||||
} else if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
WriteJSON(response, http.StatusNotFound, NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"digest": digest})))
|
||||
WriteJSON(response, http.StatusNotFound, NewErrorList(NewError(BLOB_UNKNOWN,
|
||||
map[string]string{"digest": digest.String()})))
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
@ -590,14 +595,15 @@ func (rh *RouteHandler) CheckBlob(response http.ResponseWriter, request *http.Re
|
||||
}
|
||||
|
||||
if !ok {
|
||||
WriteJSON(response, http.StatusNotFound, NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"digest": digest})))
|
||||
WriteJSON(response, http.StatusNotFound, NewErrorList(NewError(BLOB_UNKNOWN,
|
||||
map[string]string{"digest": digest.String()})))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
response.Header().Set("Content-Length", fmt.Sprintf("%d", blen))
|
||||
response.Header().Set("Accept-Ranges", "bytes")
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
response.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
@ -669,22 +675,25 @@ func (rh *RouteHandler) GetBlob(response http.ResponseWriter, request *http.Requ
|
||||
|
||||
imgStore := rh.getImageStore(name)
|
||||
|
||||
digest, ok := vars["digest"]
|
||||
if !ok || digest == "" {
|
||||
digestStr, ok := vars["digest"]
|
||||
|
||||
if !ok || digestStr == "" {
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
mediaType := request.Header.Get("Accept")
|
||||
digest := godigest.Digest(digestStr)
|
||||
|
||||
var err error
|
||||
mediaType := request.Header.Get("Accept")
|
||||
|
||||
/* content range is supported for resumbale pulls */
|
||||
partial := false
|
||||
|
||||
var from, to int64
|
||||
|
||||
var err error
|
||||
|
||||
contentRange := request.Header.Get("Range")
|
||||
|
||||
_, ok = request.Header["Range"]
|
||||
@ -719,7 +728,7 @@ func (rh *RouteHandler) GetBlob(response http.ResponseWriter, request *http.Requ
|
||||
if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response,
|
||||
http.StatusBadRequest,
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest.String()})))
|
||||
} else if errors.Is(err, zerr.ErrRepoNotFound) {
|
||||
WriteJSON(response,
|
||||
http.StatusNotFound,
|
||||
@ -727,7 +736,7 @@ func (rh *RouteHandler) GetBlob(response http.ResponseWriter, request *http.Requ
|
||||
} else if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
WriteJSON(response,
|
||||
http.StatusNotFound,
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"digest": digest.String()})))
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
@ -746,7 +755,7 @@ func (rh *RouteHandler) GetBlob(response http.ResponseWriter, request *http.Requ
|
||||
|
||||
response.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, from+blen-1, bsize))
|
||||
} else {
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
}
|
||||
|
||||
// return the blob data
|
||||
@ -772,8 +781,10 @@ func (rh *RouteHandler) DeleteBlob(response http.ResponseWriter, request *http.R
|
||||
return
|
||||
}
|
||||
|
||||
digest, ok := vars["digest"]
|
||||
if !ok || digest == "" {
|
||||
digestStr, ok := vars["digest"]
|
||||
digest, err := godigest.Parse(digestStr)
|
||||
|
||||
if !ok || digestStr == "" || err != nil {
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
|
||||
return
|
||||
@ -781,12 +792,12 @@ func (rh *RouteHandler) DeleteBlob(response http.ResponseWriter, request *http.R
|
||||
|
||||
imgStore := rh.getImageStore(name)
|
||||
|
||||
err := imgStore.DeleteBlob(name, digest)
|
||||
err = imgStore.DeleteBlob(name, digest)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response,
|
||||
http.StatusBadRequest,
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest.String()})))
|
||||
} else if errors.Is(err, zerr.ErrRepoNotFound) {
|
||||
WriteJSON(response,
|
||||
http.StatusNotFound,
|
||||
@ -794,7 +805,7 @@ func (rh *RouteHandler) DeleteBlob(response http.ResponseWriter, request *http.R
|
||||
} else if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
WriteJSON(response,
|
||||
http.StatusNotFound,
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(BLOB_UNKNOWN, map[string]string{".String()": digest.String()})))
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
@ -838,10 +849,11 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
||||
return
|
||||
}
|
||||
|
||||
mountDigest := godigest.Digest(mountDigests[0])
|
||||
// zot does not support cross mounting directly and do a workaround creating using hard link.
|
||||
// check blob looks for actual path (name+mountDigests[0]) first then look for cache and
|
||||
// if found in cache, will do hard link and if fails we will start new upload.
|
||||
_, _, err := imgStore.CheckBlob(name, mountDigests[0])
|
||||
_, _, err := imgStore.CheckBlob(name, mountDigest)
|
||||
if err != nil {
|
||||
upload, err := imgStore.NewBlobUpload(name)
|
||||
if err != nil {
|
||||
@ -862,7 +874,7 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
||||
return
|
||||
}
|
||||
|
||||
response.Header().Set("Location", getBlobUploadLocation(request.URL, name, mountDigests[0]))
|
||||
response.Header().Set("Location", getBlobUploadLocation(request.URL, name, mountDigest))
|
||||
response.WriteHeader(http.StatusCreated)
|
||||
|
||||
return
|
||||
@ -883,8 +895,6 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
||||
return
|
||||
}
|
||||
|
||||
digest := digests[0]
|
||||
|
||||
if contentType := request.Header.Get("Content-Type"); contentType != constants.BinaryMediaType {
|
||||
rh.c.Log.Warn().Str("actual", contentType).Str("expected", constants.BinaryMediaType).Msg("invalid media type")
|
||||
response.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
@ -894,15 +904,17 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
||||
|
||||
rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG")
|
||||
|
||||
digestStr := digests[0]
|
||||
|
||||
digest := godigest.Digest(digestStr)
|
||||
|
||||
var contentLength int64
|
||||
|
||||
var err error
|
||||
|
||||
contentLength, err = strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64)
|
||||
contentLength, err := strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil || contentLength <= 0 {
|
||||
rh.c.Log.Warn().Str("actual", request.Header.Get("Content-Length")).Msg("invalid content length")
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(BLOB_UPLOAD_INVALID, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(BLOB_UPLOAD_INVALID, map[string]string{"digest": digest.String()})))
|
||||
|
||||
return
|
||||
}
|
||||
@ -1139,7 +1151,12 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request *
|
||||
return
|
||||
}
|
||||
|
||||
digest := digests[0]
|
||||
digest, err := godigest.Parse(digests[0])
|
||||
if err != nil {
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG")
|
||||
|
||||
@ -1212,7 +1229,7 @@ finish:
|
||||
if err := imgStore.FinishBlobUpload(name, sessionID, request.Body, digest); err != nil {
|
||||
if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest})))
|
||||
NewErrorList(NewError(DIGEST_INVALID, map[string]string{"digest": digest.String()})))
|
||||
} else if errors.Is(err, zerr.ErrBadUploadRange) {
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID})))
|
||||
@ -1237,7 +1254,7 @@ finish:
|
||||
|
||||
response.Header().Set("Location", getBlobUploadLocation(request.URL, name, digest))
|
||||
response.Header().Set("Content-Length", "0")
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.Header().Set(constants.DistContentDigestKey, digest.String())
|
||||
response.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
@ -1443,7 +1460,7 @@ func (rh *RouteHandler) getImageStore(name string) storage.ImageStore {
|
||||
// will sync on demand if an image is not found, in case sync extensions is enabled.
|
||||
func getImageManifest(routeHandler *RouteHandler, imgStore storage.ImageStore, name,
|
||||
reference string,
|
||||
) ([]byte, string, string, error) {
|
||||
) ([]byte, godigest.Digest, string, error) {
|
||||
content, digest, mediaType, err := imgStore.GetImageManifest(name, reference)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoNotFound) || errors.Is(err, zerr.ErrManifestNotFound) {
|
||||
@ -1471,7 +1488,7 @@ func getImageManifest(routeHandler *RouteHandler, imgStore storage.ImageStore, n
|
||||
}
|
||||
|
||||
// will sync referrers on demand if they are not found, in case sync extensions is enabled.
|
||||
func getReferrers(routeHandler *RouteHandler, imgStore storage.ImageStore, name, digest,
|
||||
func getReferrers(routeHandler *RouteHandler, imgStore storage.ImageStore, name string, digest godigest.Digest,
|
||||
artifactType string,
|
||||
) ([]artifactspec.Descriptor, error) {
|
||||
refs, err := imgStore.GetReferrers(name, digest, artifactType)
|
||||
@ -1480,12 +1497,12 @@ func getReferrers(routeHandler *RouteHandler, imgStore storage.ImageStore, name,
|
||||
routeHandler.c.Config.Extensions.Sync != nil &&
|
||||
*routeHandler.c.Config.Extensions.Sync.Enable {
|
||||
routeHandler.c.Log.Info().Msgf("signature not found, trying to get signature %s:%s by syncing on demand",
|
||||
name, digest)
|
||||
name, digest.String())
|
||||
|
||||
errSync := ext.SyncOneImage(routeHandler.c.Config, routeHandler.c.StoreController,
|
||||
name, digest, true, routeHandler.c.Log)
|
||||
name, digest.String(), true, routeHandler.c.Log)
|
||||
if errSync != nil {
|
||||
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest).Msg("unable to get references")
|
||||
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
|
||||
|
||||
return []artifactspec.Descriptor{}, err
|
||||
}
|
||||
@ -1523,8 +1540,10 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http
|
||||
return
|
||||
}
|
||||
|
||||
digest, ok := vars["digest"]
|
||||
if !ok || digest == "" {
|
||||
digestStr, ok := vars["digest"]
|
||||
digest, err := godigest.Parse(digestStr)
|
||||
|
||||
if !ok || digestStr == "" || err != nil {
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
return
|
||||
@ -1549,11 +1568,11 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http
|
||||
|
||||
imgStore := rh.getImageStore(name)
|
||||
|
||||
rh.c.Log.Info().Str("digest", digest).Str("artifactType", artifactType).Msg("getting manifest")
|
||||
rh.c.Log.Info().Str("digest", digest.String()).Str("artifactType", artifactType).Msg("getting manifest")
|
||||
|
||||
refs, err := getReferrers(rh, imgStore, name, digest, artifactType) //nolint:contextcheck
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest).Msg("unable to get references")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
return
|
||||
@ -1578,7 +1597,7 @@ func getBlobUploadSessionLocation(url *url.URL, sessionID string) string {
|
||||
|
||||
// GetBlobUploadLocation returns actual blob location on registry
|
||||
// e.g /v2/<name>/blobs/<digest>.
|
||||
func getBlobUploadLocation(url *url.URL, name, digest string) string {
|
||||
func getBlobUploadLocation(url *url.URL, name string, digest godigest.Digest) string {
|
||||
url.RawQuery = ""
|
||||
|
||||
// we are relying on request URL to set location and
|
||||
@ -1586,7 +1605,7 @@ func getBlobUploadLocation(url *url.URL, name, digest string) string {
|
||||
// getBlobUploadLocation will be called only when blob upload is completed and
|
||||
// location should be set as blob url <v2/<name>/blobs/<digest>>.
|
||||
if strings.Contains(url.Path, "uploads") {
|
||||
url.Path = path.Join(constants.RoutePrefix, name, constants.Blobs, digest)
|
||||
url.Path = path.Join(constants.RoutePrefix, name, constants.Blobs, digest.String())
|
||||
}
|
||||
|
||||
return url.String()
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
@ -56,7 +57,7 @@ func TestRoutes(t *testing.T) {
|
||||
Convey("Get manifest", func() {
|
||||
// overwrite controller storage
|
||||
ctlr.StoreController.DefaultStore = &mocks.MockedImageStore{
|
||||
GetImageManifestFn: func(repo string, reference string) ([]byte, string, string, error) {
|
||||
GetImageManifestFn: func(repo string, reference string) ([]byte, godigest.Digest, string, error) {
|
||||
return []byte{}, "", "", zerr.ErrRepoBadVersion
|
||||
},
|
||||
}
|
||||
@ -100,7 +101,7 @@ func TestRoutes(t *testing.T) {
|
||||
"reference": "reference",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (string, error) {
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) {
|
||||
return "", zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -113,7 +114,7 @@ func TestRoutes(t *testing.T) {
|
||||
},
|
||||
|
||||
&mocks.MockedImageStore{
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (string, error) {
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) {
|
||||
return "", zerr.ErrManifestNotFound
|
||||
},
|
||||
})
|
||||
@ -125,7 +126,7 @@ func TestRoutes(t *testing.T) {
|
||||
"reference": "reference",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (string, error) {
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) {
|
||||
return "", zerr.ErrBadManifest
|
||||
},
|
||||
})
|
||||
@ -137,7 +138,7 @@ func TestRoutes(t *testing.T) {
|
||||
"reference": "reference",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (string, error) {
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) {
|
||||
return "", zerr.ErrBlobNotFound
|
||||
},
|
||||
})
|
||||
@ -150,7 +151,7 @@ func TestRoutes(t *testing.T) {
|
||||
"reference": "reference",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (string, error) {
|
||||
PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) {
|
||||
return "", zerr.ErrRepoBadVersion
|
||||
},
|
||||
})
|
||||
@ -258,7 +259,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": test.GetTestBlobDigest("zot-cve-test", "layer").String(),
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
DeleteBlobFn: func(repo, digest string) error {
|
||||
DeleteBlobFn: func(repo string, digest godigest.Digest) error {
|
||||
return ErrUnexpectedError
|
||||
},
|
||||
})
|
||||
@ -270,7 +271,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "sha256:7b8437f04f83f084b7ed68ad8c4a4947e12fc4e1b006b38129bac89114ec3621",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
DeleteBlobFn: func(repo, digest string) error {
|
||||
DeleteBlobFn: func(repo string, digest godigest.Digest) error {
|
||||
return zerr.ErrBadBlobDigest
|
||||
},
|
||||
})
|
||||
@ -283,7 +284,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": test.GetTestBlobDigest("zot-cve-test", "layer").String(),
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
DeleteBlobFn: func(repo, digest string) error {
|
||||
DeleteBlobFn: func(repo string, digest godigest.Digest) error {
|
||||
return zerr.ErrBlobNotFound
|
||||
},
|
||||
})
|
||||
@ -296,7 +297,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": test.GetTestBlobDigest("zot-cve-test", "layer").String(),
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
DeleteBlobFn: func(repo, digest string) error {
|
||||
DeleteBlobFn: func(repo string, digest godigest.Digest) error {
|
||||
return zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -326,7 +327,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "1234",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrBadBlobDigest
|
||||
},
|
||||
})
|
||||
@ -339,7 +340,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "1234",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -352,7 +353,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "1234",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrBlobNotFound
|
||||
},
|
||||
})
|
||||
@ -365,7 +366,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "1234",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, ErrUnexpectedError
|
||||
},
|
||||
})
|
||||
@ -378,7 +379,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": "1234",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return false, 0, nil
|
||||
},
|
||||
})
|
||||
@ -406,7 +407,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": test.GetTestBlobDigest("zot-cve-test", "layer").String(),
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
GetBlobFn: func(repo, digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
GetBlobFn: func(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
return io.NopCloser(bytes.NewBuffer([]byte(""))), 0, zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -419,7 +420,7 @@ func TestRoutes(t *testing.T) {
|
||||
"digest": test.GetTestBlobDigest("zot-cve-test", "layer").String(),
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
GetBlobFn: func(repo, digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
GetBlobFn: func(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
return io.NopCloser(bytes.NewBuffer([]byte(""))), 0, zerr.ErrBadBlobDigest
|
||||
},
|
||||
})
|
||||
@ -470,7 +471,7 @@ func TestRoutes(t *testing.T) {
|
||||
NewBlobUploadFn: func(repo string) (string, error) {
|
||||
return "", zerr.ErrRepoNotFound
|
||||
},
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -487,7 +488,7 @@ func TestRoutes(t *testing.T) {
|
||||
NewBlobUploadFn: func(repo string) (string, error) {
|
||||
return "", zerr.ErrRepoNotFound
|
||||
},
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -505,7 +506,7 @@ func TestRoutes(t *testing.T) {
|
||||
NewBlobUploadFn: func(repo string) (string, error) {
|
||||
return "", zerr.ErrRepoNotFound
|
||||
},
|
||||
CheckBlobFn: func(repo, digest string) (bool, int64, error) {
|
||||
CheckBlobFn: func(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
return true, 0, zerr.ErrRepoNotFound
|
||||
},
|
||||
})
|
||||
@ -521,7 +522,7 @@ func TestRoutes(t *testing.T) {
|
||||
"Content-Length": "100",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FullBlobUploadFn: func(repo string, body io.Reader, digest string) (string, int64, error) {
|
||||
FullBlobUploadFn: func(repo string, body io.Reader, digest godigest.Digest) (string, int64, error) {
|
||||
return "session", 0, zerr.ErrBadBlobDigest
|
||||
},
|
||||
})
|
||||
@ -537,7 +538,7 @@ func TestRoutes(t *testing.T) {
|
||||
"Content-Length": "100",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FullBlobUploadFn: func(repo string, body io.Reader, digest string) (string, int64, error) {
|
||||
FullBlobUploadFn: func(repo string, body io.Reader, digest godigest.Digest) (string, int64, error) {
|
||||
return "session", 20, nil
|
||||
},
|
||||
})
|
||||
@ -965,7 +966,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrBadBlobDigest
|
||||
},
|
||||
},
|
||||
@ -985,7 +986,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrBadUploadRange
|
||||
},
|
||||
},
|
||||
@ -1005,7 +1006,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrRepoNotFound
|
||||
},
|
||||
},
|
||||
@ -1025,7 +1026,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrUploadNotFound
|
||||
},
|
||||
},
|
||||
@ -1045,7 +1046,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return ErrUnexpectedError
|
||||
},
|
||||
DeleteBlobUploadFn: func(repo, uuid string) error {
|
||||
@ -1319,7 +1320,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrUploadNotFound
|
||||
},
|
||||
},
|
||||
@ -1339,7 +1340,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrUploadNotFound
|
||||
},
|
||||
},
|
||||
@ -1359,7 +1360,7 @@ func TestRoutes(t *testing.T) {
|
||||
"session_id": "test",
|
||||
},
|
||||
&mocks.MockedImageStore{
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest string) error {
|
||||
FinishBlobUploadFn: func(repo, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
return zerr.ErrUploadNotFound
|
||||
},
|
||||
},
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -272,11 +271,8 @@ func (p *requestsPool) doJob(ctx context.Context, job *manifestJob) {
|
||||
p.outputCh <- stringResult{"", err}
|
||||
}
|
||||
|
||||
digest := header.Get("docker-content-digest")
|
||||
digest = strings.TrimPrefix(digest, "sha256:")
|
||||
|
||||
digestStr := header.Get("docker-content-digest")
|
||||
configDigest := job.manifestResp.Config.Digest
|
||||
configDigest = strings.TrimPrefix(configDigest, "sha256:")
|
||||
|
||||
var size uint64
|
||||
|
||||
@ -289,7 +285,7 @@ func (p *requestsPool) doJob(ctx context.Context, job *manifestJob) {
|
||||
layers,
|
||||
layer{
|
||||
Size: entry.Size,
|
||||
Digest: strings.TrimPrefix(entry.Digest, "sha256:"),
|
||||
Digest: entry.Digest,
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -298,7 +294,7 @@ func (p *requestsPool) doJob(ctx context.Context, job *manifestJob) {
|
||||
image.verbose = *job.config.verbose
|
||||
image.RepoName = job.imageName
|
||||
image.Tag = job.tagName
|
||||
image.Digest = digest
|
||||
image.Digest = digestStr
|
||||
image.Size = strconv.Itoa(int(size))
|
||||
image.ConfigDigest = configDigest
|
||||
image.Layers = layers
|
||||
|
@ -183,7 +183,7 @@ func TestSearchCVECmd(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
Convey("using shorthand", func() {
|
||||
args := []string{"cvetest", "-I", "dummyImageName", "--cve-id", "aCVEID", "--url", "someURL"}
|
||||
buff := bytes.NewBufferString("")
|
||||
@ -197,7 +197,7 @@ func TestSearchCVECmd(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
})
|
||||
})
|
||||
|
||||
@ -281,7 +281,7 @@ func TestSearchCVECmd(t *testing.T) {
|
||||
err := cveCmd.Execute()
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE anImage tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE anImage tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("invalid CVE ID", func() {
|
||||
@ -326,7 +326,7 @@ func TestSearchCVECmd(t *testing.T) {
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(err, ShouldBeNil)
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE fixedImage tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE fixedImage tag 6e2f80bf false 123kB")
|
||||
|
||||
Convey("invalid image name", func() {
|
||||
args := []string{"cvetest", "--cve-id", "aCVEID", "--image", "invalidImageName"}
|
||||
|
@ -187,7 +187,7 @@ func TestSearchImageCmd(t *testing.T) {
|
||||
err := cmd.Execute()
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
@ -203,7 +203,7 @@ func TestSearchImageCmd(t *testing.T) {
|
||||
err := imageCmd.Execute()
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
Convey("using shorthand", func() {
|
||||
args := []string{"imagetest", "-n", "dummyImageName", "--url", "someUrlImage"}
|
||||
@ -218,13 +218,13 @@ func TestSearchImageCmd(t *testing.T) {
|
||||
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Test image by digest", t, func() {
|
||||
args := []string{"imagetest", "--digest", "DigestsA", "--url", "someUrlImage"}
|
||||
args := []string{"imagetest", "--digest", "6e2f80bf", "--url", "someUrlImage"}
|
||||
configPath := makeConfigFile(`{"configs":[{"_name":"imagetest","showspinner":false}]}`)
|
||||
defer os.Remove(configPath)
|
||||
imageCmd := NewImageCommand(new(mockService))
|
||||
@ -235,7 +235,7 @@ func TestSearchImageCmd(t *testing.T) {
|
||||
err := imageCmd.Execute()
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE anImage tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE anImage tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("invalid URL format", func() {
|
||||
@ -385,42 +385,47 @@ func TestSignature(t *testing.T) {
|
||||
|
||||
//nolint:dupl
|
||||
func TestDerivedImageList(t *testing.T) {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("rootDir: %s", ctlr.Config.Storage.RootDirectory)
|
||||
|
||||
Convey("Test from real server", t, func() {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
So(err, ShouldBeNil)
|
||||
t.Logf("rootDir: %s", ctlr.Config.Storage.RootDirectory)
|
||||
|
||||
Convey("Test derived images list working", func() {
|
||||
t.Logf("%s", ctlr.Config.Storage.RootDirectory)
|
||||
args := []string{"imagetest", "--derived-images", "repo7:test:1.0"}
|
||||
@ -479,42 +484,47 @@ func TestDerivedImageList(t *testing.T) {
|
||||
|
||||
//nolint:dupl
|
||||
func TestBaseImageList(t *testing.T) {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("rootDir: %s", ctlr.Config.Storage.RootDirectory)
|
||||
|
||||
Convey("Test from real server", t, func() {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
So(err, ShouldBeNil)
|
||||
t.Logf("rootDir: %s", ctlr.Config.Storage.RootDirectory)
|
||||
|
||||
Convey("Test base images list working", func() {
|
||||
t.Logf("%s", ctlr.Config.Storage.RootDirectory)
|
||||
args := []string{"imagetest", "--base-images", "repo7:test:1.0"}
|
||||
@ -727,7 +737,7 @@ func TestOutputFormat(t *testing.T) {
|
||||
err := cmd.Execute()
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag DigestsA false 123kB")
|
||||
So(strings.TrimSpace(str), ShouldEqual, "IMAGE NAME TAG DIGEST SIGNED SIZE dummyImageName tag 6e2f80bf false 123kB")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
@ -746,7 +756,11 @@ func TestOutputFormat(t *testing.T) {
|
||||
space := regexp.MustCompile(`\s+`)
|
||||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
So(strings.TrimSpace(str), ShouldEqual, `{ "repoName": "dummyImageName", "tag": "tag", `+
|
||||
`"configDigest": "", "digest": "DigestsAreReallyLong", "layers": null, "size": "123445", "isSigned": false }`)
|
||||
`"configDigest": "sha256:4c10985c40365538426f2ba8cf0c21384a7769be502a550dcc0601b3736625e0", `+
|
||||
`"digest": "sha256:6e2f80bf9cfaabad474fbaf8ad68fdb652f776ea80b63492ecca404e5f6446a6", `+
|
||||
`"layers": [ { "size": "0", `+
|
||||
`"digest": "sha256:c122a146f0d02349be211bb95cc2530f4a5793f96edbdfa00860f741e5d8c0e6" } ], `+
|
||||
`"size": "123445", "isSigned": false }`)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
@ -765,9 +779,11 @@ func TestOutputFormat(t *testing.T) {
|
||||
So(
|
||||
strings.TrimSpace(str),
|
||||
ShouldEqual,
|
||||
`reponame: dummyImageName tag: tag configdigest: "" `+
|
||||
`digest: DigestsAreReallyLong layers: [] size: "123445" `+
|
||||
`issigned: false`,
|
||||
`reponame: dummyImageName tag: tag `+
|
||||
`configdigest: sha256:4c10985c40365538426f2ba8cf0c21384a7769be502a550dcc0601b3736625e0 `+
|
||||
`digest: sha256:6e2f80bf9cfaabad474fbaf8ad68fdb652f776ea80b63492ecca404e5f6446a6 `+
|
||||
`layers: - size: 0 digest: sha256:c122a146f0d02349be211bb95cc2530f4a5793f96edbdfa00860f741e5d8c0e6 `+
|
||||
`size: "123445" issigned: false`,
|
||||
)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -789,9 +805,11 @@ func TestOutputFormat(t *testing.T) {
|
||||
So(
|
||||
strings.TrimSpace(str),
|
||||
ShouldEqual,
|
||||
`reponame: dummyImageName tag: tag configdigest: "" `+
|
||||
`digest: DigestsAreReallyLong layers: [] size: "123445" `+
|
||||
`issigned: false`,
|
||||
`reponame: dummyImageName tag: tag `+
|
||||
`configdigest: sha256:4c10985c40365538426f2ba8cf0c21384a7769be502a550dcc0601b3736625e0 `+
|
||||
`digest: sha256:6e2f80bf9cfaabad474fbaf8ad68fdb652f776ea80b63492ecca404e5f6446a6 `+
|
||||
`layers: - size: 0 digest: sha256:c122a146f0d02349be211bb95cc2530f4a5793f96edbdfa00860f741e5d8c0e6 `+
|
||||
`size: "123445" issigned: false`,
|
||||
)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
@ -1086,41 +1104,47 @@ func TestServerResponseGQL(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServerResponse(t *testing.T) {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("%s", ctlr.Config.Storage.RootDirectory)
|
||||
|
||||
Convey("Test from real server", t, func() {
|
||||
port := test.GetFreePort()
|
||||
url := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
defaultVal := true
|
||||
conf.Extensions = &extconf.ExtensionConfig{
|
||||
Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}},
|
||||
}
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = t.TempDir()
|
||||
go func(controller *api.Controller) {
|
||||
// this blocks
|
||||
if err := controller.Run(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}(ctlr)
|
||||
// wait till ready
|
||||
for {
|
||||
_, err := resty.R().Get(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
defer func(controller *api.Controller) {
|
||||
ctx := context.Background()
|
||||
_ = controller.Server.Shutdown(ctx)
|
||||
}(ctlr)
|
||||
|
||||
err := uploadManifest(url)
|
||||
t.Logf("%s", ctlr.Config.Storage.RootDirectory)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Test all images", func() {
|
||||
t.Logf("%s", ctlr.Config.Storage.RootDirectory)
|
||||
args := []string{"imagetest"}
|
||||
@ -1555,10 +1579,12 @@ func (service mockService) getDerivedImageListGQL(ctx context.Context, config se
|
||||
imageListGQLResponse := &imageListStructForDerivedImagesGQL{}
|
||||
imageListGQLResponse.Data.ImageList = []imageStruct{
|
||||
{
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: "DigestsAreReallyLong",
|
||||
Size: "123445",
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: godigest.FromString("Digest").String(),
|
||||
ConfigDigest: godigest.FromString("ConfigDigest").String(),
|
||||
Size: "123445",
|
||||
Layers: []layer{{Digest: godigest.FromString("LayerDigest").String()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1571,10 +1597,12 @@ func (service mockService) getBaseImageListGQL(ctx context.Context, config searc
|
||||
imageListGQLResponse := &imageListStructForBaseImagesGQL{}
|
||||
imageListGQLResponse.Data.ImageList = []imageStruct{
|
||||
{
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: "DigestsAreReallyLong",
|
||||
Size: "123445",
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: godigest.FromString("Digest").String(),
|
||||
ConfigDigest: godigest.FromString("ConfigDigest").String(),
|
||||
Size: "123445",
|
||||
Layers: []layer{{Digest: godigest.FromString("LayerDigest").String()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1587,10 +1615,12 @@ func (service mockService) getImagesGQL(ctx context.Context, config searchConfig
|
||||
imageListGQLResponse := &imageListStructGQL{}
|
||||
imageListGQLResponse.Data.ImageList = []imageStruct{
|
||||
{
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: "DigestsAreReallyLong",
|
||||
Size: "123445",
|
||||
RepoName: "dummyImageName",
|
||||
Tag: "tag",
|
||||
Digest: godigest.FromString("Digest").String(),
|
||||
ConfigDigest: godigest.FromString("ConfigDigest").String(),
|
||||
Size: "123445",
|
||||
Layers: []layer{{Digest: godigest.FromString("LayerDigest").String()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1603,10 +1633,12 @@ func (service mockService) getImagesByDigestGQL(ctx context.Context, config sear
|
||||
imageListGQLResponse := &imageListStructForDigestGQL{}
|
||||
imageListGQLResponse.Data.ImageList = []imageStruct{
|
||||
{
|
||||
RepoName: "randomimageName",
|
||||
Tag: "tag",
|
||||
Digest: "DigestsAreReallyLong",
|
||||
Size: "123445",
|
||||
RepoName: "randomimageName",
|
||||
Tag: "tag",
|
||||
Digest: godigest.FromString("Digest").String(),
|
||||
ConfigDigest: godigest.FromString("ConfigDigest").String(),
|
||||
Size: "123445",
|
||||
Layers: []layer{{Digest: godigest.FromString("LayerDigest").String()}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1700,8 +1732,10 @@ func (service mockService) getMockedImageByName(imageName string) imageStruct {
|
||||
image := imageStruct{}
|
||||
image.RepoName = imageName
|
||||
image.Tag = "tag"
|
||||
image.Digest = "DigestsAreReallyLong"
|
||||
image.Digest = godigest.FromString("Digest").String()
|
||||
image.ConfigDigest = godigest.FromString("ConfigDigest").String()
|
||||
image.Size = "123445"
|
||||
image.Layers = []layer{{Digest: godigest.FromString("LayerDigest").String()}}
|
||||
|
||||
return image
|
||||
}
|
||||
@ -1715,8 +1749,10 @@ func (service mockService) getAllImages(ctx context.Context, config searchConfig
|
||||
image := &imageStruct{}
|
||||
image.RepoName = "randomimageName"
|
||||
image.Tag = "tag"
|
||||
image.Digest = "DigestsAreReallyLong"
|
||||
image.Digest = godigest.FromString("Digest").String()
|
||||
image.ConfigDigest = godigest.FromString("ConfigDigest").String()
|
||||
image.Size = "123445"
|
||||
image.Layers = []layer{{Digest: godigest.FromString("LayerDigest").String()}}
|
||||
|
||||
str, err := image.string(*config.outputFormat, len(image.RepoName), len(image.Tag))
|
||||
if err != nil {
|
||||
@ -1737,8 +1773,10 @@ func (service mockService) getImageByName(ctx context.Context, config searchConf
|
||||
image := &imageStruct{}
|
||||
image.RepoName = imageName
|
||||
image.Tag = "tag"
|
||||
image.Digest = "DigestsAreReallyLong"
|
||||
image.Digest = godigest.FromString("Digest").String()
|
||||
image.ConfigDigest = godigest.FromString("ConfigDigest").String()
|
||||
image.Size = "123445"
|
||||
image.Layers = []layer{{Digest: godigest.FromString("LayerDigest").String()}}
|
||||
|
||||
str, err := image.string(*config.outputFormat, len(image.RepoName), len(image.Tag))
|
||||
if err != nil {
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
zotErrors "zotregistry.io/zot/errors"
|
||||
@ -74,6 +75,7 @@ func (service searchService) getDerivedImageListGQL(ctx context.Context, config
|
||||
Tag,
|
||||
Digest,
|
||||
ConfigDigest,
|
||||
Layers {Size Digest},
|
||||
LastUpdated,
|
||||
IsSigned,
|
||||
Size
|
||||
@ -100,6 +102,7 @@ func (service searchService) getBaseImageListGQL(ctx context.Context, config sea
|
||||
Tag,
|
||||
Digest,
|
||||
ConfigDigest,
|
||||
Layers {Size Digest},
|
||||
LastUpdated,
|
||||
IsSigned,
|
||||
Size
|
||||
@ -156,7 +159,7 @@ func (service searchService) getImagesByCveIDGQL(ctx context.Context, config sea
|
||||
password, cveID string,
|
||||
) (*imagesForCve, error) {
|
||||
query := fmt.Sprintf(`{ImageListForCVE(id: "%s") {`+`
|
||||
RepoName Tag Digest Size}
|
||||
RepoName Tag Digest ConfigDigest Layers {Size Digest} Size}
|
||||
}`,
|
||||
cveID)
|
||||
result := &imagesForCve{}
|
||||
@ -193,7 +196,7 @@ func (service searchService) getTagsForCVEGQL(ctx context.Context, config search
|
||||
username, password, imageName, cveID string,
|
||||
) (*imagesForCve, error) {
|
||||
query := fmt.Sprintf(`{ImageListForCVE(id: "%s") {`+`
|
||||
RepoName Tag Digest Size}
|
||||
RepoName Tag Digest ConfigDigest Layers {Size Digest} Size}
|
||||
}`,
|
||||
cveID)
|
||||
result := &imagesForCve{}
|
||||
@ -211,7 +214,7 @@ func (service searchService) getFixedTagsForCVEGQL(ctx context.Context, config s
|
||||
username, password, imageName, cveID string,
|
||||
) (*fixedTags, error) {
|
||||
query := fmt.Sprintf(`{ImageListWithCVEFixed(id: "%s", image: "%s") {`+`
|
||||
RepoName Tag Digest Size}
|
||||
RepoName Tag Digest ConfigDigest Layers {Size Digest} Size}
|
||||
}`,
|
||||
cveID, imageName)
|
||||
|
||||
@ -334,7 +337,7 @@ func (service searchService) getImagesByCveID(ctx context.Context, config search
|
||||
defer close(rch)
|
||||
|
||||
query := fmt.Sprintf(`{ImageListForCVE(id: "%s") {`+`
|
||||
RepoName Tag Digest Size}
|
||||
RepoName Tag Digest ConfigDigest Layers {Size Digest} Size}
|
||||
}`,
|
||||
cvid)
|
||||
result := &imagesForCve{}
|
||||
@ -551,7 +554,7 @@ func (service searchService) getFixedTagsForCVE(ctx context.Context, config sear
|
||||
defer close(rch)
|
||||
|
||||
query := fmt.Sprintf(`{ImageListWithCVEFixed (id: "%s", image: "%s") {`+`
|
||||
RepoName Tag Digest Size}
|
||||
RepoName Tag Digest ConfigDigest Layers {Size Digest} Size}
|
||||
}`,
|
||||
cvid, imageName)
|
||||
result := &fixedTags{}
|
||||
@ -924,21 +927,32 @@ func (img imageStruct) stringPlainText(maxImgNameLen, maxTagLen int) (string, er
|
||||
|
||||
imageName = img.RepoName
|
||||
tagName = img.Tag
|
||||
digest := ellipsize(img.Digest, digestWidth, "")
|
||||
|
||||
manifestDigest, err := godigest.Parse(img.Digest)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing manifest digest %s: %w", img.Digest, err)
|
||||
}
|
||||
|
||||
configDigest, err := godigest.Parse(img.ConfigDigest)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing config digest %s: %w", img.ConfigDigest, err)
|
||||
}
|
||||
|
||||
minifestDigestStr := ellipsize(manifestDigest.Encoded(), digestWidth, "")
|
||||
configDigestStr := ellipsize(configDigest.Encoded(), configWidth, "")
|
||||
imgSize, _ := strconv.ParseUint(img.Size, 10, 64)
|
||||
size := ellipsize(strings.ReplaceAll(humanize.Bytes(imgSize), " ", ""), sizeWidth, ellipsis)
|
||||
config := ellipsize(img.ConfigDigest, configWidth, "")
|
||||
isSigned := img.IsSigned
|
||||
row := make([]string, 7) //nolint:gomnd
|
||||
|
||||
row[colImageNameIndex] = imageName
|
||||
row[colTagIndex] = tagName
|
||||
row[colDigestIndex] = digest
|
||||
row[colDigestIndex] = minifestDigestStr
|
||||
row[colSizeIndex] = size
|
||||
row[colIsSignedIndex] = strconv.FormatBool(isSigned)
|
||||
|
||||
if img.verbose {
|
||||
row[colConfigIndex] = config
|
||||
row[colConfigIndex] = configDigestStr
|
||||
row[colLayersIndex] = ""
|
||||
}
|
||||
|
||||
@ -948,7 +962,13 @@ func (img imageStruct) stringPlainText(maxImgNameLen, maxTagLen int) (string, er
|
||||
for _, entry := range img.Layers {
|
||||
layerSize := entry.Size
|
||||
size := ellipsize(strings.ReplaceAll(humanize.Bytes(layerSize), " ", ""), sizeWidth, ellipsis)
|
||||
layerDigest := ellipsize(entry.Digest, digestWidth, "")
|
||||
|
||||
layerDigest, err := godigest.Parse(entry.Digest)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing layer digest %s: %w", entry.Digest, err)
|
||||
}
|
||||
|
||||
layerDigestStr := ellipsize(layerDigest.Encoded(), digestWidth, "")
|
||||
|
||||
layerRow := make([]string, 7) //nolint:gomnd
|
||||
layerRow[colImageNameIndex] = ""
|
||||
@ -956,7 +976,7 @@ func (img imageStruct) stringPlainText(maxImgNameLen, maxTagLen int) (string, er
|
||||
layerRow[colDigestIndex] = ""
|
||||
layerRow[colSizeIndex] = size
|
||||
layerRow[colConfigIndex] = ""
|
||||
layerRow[colLayersIndex] = layerDigest
|
||||
layerRow[colLayersIndex] = layerDigestStr
|
||||
|
||||
table.Append(layerRow)
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
||||
|
||||
mandatoryAnnotationsList := linter.config.MandatoryAnnotations
|
||||
|
||||
content, err := imgStore.GetBlobContent(repo, string(manifestDigest))
|
||||
content, err := imgStore.GetBlobContent(repo, manifestDigest)
|
||||
if err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: unable to get image manifest")
|
||||
|
||||
@ -74,16 +74,16 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
||||
// if there are mandatory annotations missing in the manifest, get config and check these annotations too
|
||||
configDigest := manifest.Config.Digest
|
||||
|
||||
content, err = imgStore.GetBlobContent(repo, string(configDigest))
|
||||
content, err = imgStore.GetBlobContent(repo, configDigest)
|
||||
if err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't get config JSON " + string(configDigest))
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't get config JSON " + configDigest.String())
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
var imageConfig ispec.Image
|
||||
if err := json.Unmarshal(content, &imageConfig); err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't unmarshal config JSON " + string(configDigest))
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't unmarshal config JSON " + configDigest.String())
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -847,7 +847,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
var imageConfig ispec.Image
|
||||
configDigest := manifest.Config.Digest
|
||||
buf, err = os.ReadFile(path.Join(dir, "zot-test", "blobs", "sha256",
|
||||
configDigest.Hex()))
|
||||
configDigest.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
err = json.Unmarshal(buf, &imageConfig)
|
||||
So(err, ShouldBeNil)
|
||||
@ -863,7 +863,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
So(cfgDigest, ShouldNotBeNil)
|
||||
|
||||
err = os.WriteFile(path.Join(dir, "zot-test", "blobs", "sha256",
|
||||
cfgDigest.Hex()), configContent, 0o600)
|
||||
cfgDigest.Encoded()), configContent, 0o600)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// write manifest
|
||||
@ -892,7 +892,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
imgStore := local.NewImageStore(dir, false, 0, false, false,
|
||||
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Hex()), 0o000)
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -901,7 +901,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
|
||||
So(err, ShouldNotBeNil)
|
||||
So(pass, ShouldBeFalse)
|
||||
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Hex()), 0o755)
|
||||
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"gopkg.in/resty.v1"
|
||||
|
||||
@ -124,7 +124,7 @@ func TestScrubExtension(t *testing.T) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var manifestDigest digest.Digest
|
||||
var manifestDigest godigest.Digest
|
||||
manifestDigest, _, _ = test.GetOciLayoutDigests("../../../test/data/zot-test")
|
||||
|
||||
err = os.Remove(path.Join(dir, repoName, "blobs/sha256", manifestDigest.Encoded()))
|
||||
@ -276,7 +276,7 @@ func TestRunScrubRepo(t *testing.T) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var manifestDigest digest.Digest
|
||||
var manifestDigest godigest.Digest
|
||||
manifestDigest, _, _ = test.GetOciLayoutDigests("../../../test/data/zot-test")
|
||||
|
||||
err = os.Remove(path.Join(dir, repoName, "blobs/sha256", manifestDigest.Encoded()))
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
@ -24,7 +25,7 @@ const (
|
||||
|
||||
type TagInfo struct {
|
||||
Name string
|
||||
Digest string
|
||||
Digest godigest.Digest
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"time"
|
||||
|
||||
dbTypes "github.com/aquasecurity/trivy-db/pkg/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/generate"
|
||||
@ -491,8 +491,8 @@ func TestRepoListWithNewestImage(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var manifestDigest digest.Digest
|
||||
var configDigest digest.Digest
|
||||
var manifestDigest godigest.Digest
|
||||
var configDigest godigest.Digest
|
||||
manifestDigest, configDigest, _ = GetOciLayoutDigests("../../../../test/data/zot-test")
|
||||
|
||||
// Delete config blob and try.
|
||||
@ -840,7 +840,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0)
|
||||
found := false
|
||||
for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries {
|
||||
if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").Encoded() {
|
||||
if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").String() {
|
||||
found = true
|
||||
So(m.IsSigned, ShouldEqual, false)
|
||||
}
|
||||
@ -861,7 +861,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0)
|
||||
found = false
|
||||
for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries {
|
||||
if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").Encoded() {
|
||||
if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").String() {
|
||||
found = true
|
||||
So(m.IsSigned, ShouldEqual, true)
|
||||
}
|
||||
@ -887,7 +887,7 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0)
|
||||
found = false
|
||||
for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries {
|
||||
if m.Digest == GetTestBlobDigest("zot-test", "manifest").Encoded() {
|
||||
if m.Digest == GetTestBlobDigest("zot-test", "manifest").String() {
|
||||
found = true
|
||||
So(m.IsSigned, ShouldEqual, false)
|
||||
}
|
||||
@ -908,14 +908,14 @@ func TestExpandedRepoInfo(t *testing.T) {
|
||||
So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0)
|
||||
found = false
|
||||
for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries {
|
||||
if m.Digest == GetTestBlobDigest("zot-test", "manifest").Encoded() {
|
||||
if m.Digest == GetTestBlobDigest("zot-test", "manifest").String() {
|
||||
found = true
|
||||
So(m.IsSigned, ShouldEqual, true)
|
||||
}
|
||||
}
|
||||
So(found, ShouldEqual, true)
|
||||
|
||||
var manifestDigest digest.Digest
|
||||
var manifestDigest godigest.Digest
|
||||
manifestDigest, _, _ = GetOciLayoutDigests("../../../../test/data/zot-test")
|
||||
|
||||
err = os.Remove(path.Join(rootDir, "zot-test/blobs/sha256", manifestDigest.Encoded()))
|
||||
@ -1105,7 +1105,7 @@ func TestDerivedImageList(t *testing.T) {
|
||||
OS: "linux",
|
||||
RootFS: ispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{},
|
||||
DiffIDs: []godigest.Digest{},
|
||||
},
|
||||
Author: "ZotUser",
|
||||
}
|
||||
@ -1113,7 +1113,7 @@ func TestDerivedImageList(t *testing.T) {
|
||||
configBlob, err := json.Marshal(config)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configDigest := digest.FromBytes(configBlob)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
|
||||
layers := [][]byte{
|
||||
{10, 11, 10, 11},
|
||||
@ -1133,17 +1133,17 @@ func TestDerivedImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
},
|
||||
@ -1176,17 +1176,17 @@ func TestDerivedImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
},
|
||||
@ -1224,12 +1224,12 @@ func TestDerivedImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
},
|
||||
@ -1270,27 +1270,27 @@ func TestDerivedImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[3]),
|
||||
Digest: godigest.FromBytes(layers[3]),
|
||||
Size: int64(len(layers[3])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[4]),
|
||||
Digest: godigest.FromBytes(layers[4]),
|
||||
Size: int64(len(layers[4])),
|
||||
},
|
||||
},
|
||||
@ -1450,7 +1450,7 @@ func TestGetImageManifest(t *testing.T) {
|
||||
|
||||
Convey("Test nonexistent image", t, func() {
|
||||
mockImageStore := mocks.MockedImageStore{
|
||||
GetImageManifestFn: func(repo string, reference string) ([]byte, string, string, error) {
|
||||
GetImageManifestFn: func(repo string, reference string) ([]byte, godigest.Digest, string, error) {
|
||||
return []byte{}, "", "", ErrTestError
|
||||
},
|
||||
}
|
||||
@ -1520,7 +1520,7 @@ func TestBaseImageList(t *testing.T) {
|
||||
OS: "linux",
|
||||
RootFS: ispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{},
|
||||
DiffIDs: []godigest.Digest{},
|
||||
},
|
||||
Author: "ZotUser",
|
||||
}
|
||||
@ -1528,7 +1528,7 @@ func TestBaseImageList(t *testing.T) {
|
||||
configBlob, err := json.Marshal(config)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configDigest := digest.FromBytes(configBlob)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
|
||||
layers := [][]byte{
|
||||
{10, 11, 10, 11},
|
||||
@ -1549,22 +1549,22 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[3]),
|
||||
Digest: godigest.FromBytes(layers[3]),
|
||||
Size: int64(len(layers[3])),
|
||||
},
|
||||
},
|
||||
@ -1597,22 +1597,22 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[3]),
|
||||
Digest: godigest.FromBytes(layers[3]),
|
||||
Size: int64(len(layers[3])),
|
||||
},
|
||||
},
|
||||
@ -1650,12 +1650,12 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
},
|
||||
@ -1693,12 +1693,12 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
},
|
||||
@ -1739,27 +1739,27 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[2]),
|
||||
Digest: godigest.FromBytes(layers[2]),
|
||||
Size: int64(len(layers[2])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[3]),
|
||||
Digest: godigest.FromBytes(layers[3]),
|
||||
Size: int64(len(layers[3])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[4]),
|
||||
Digest: godigest.FromBytes(layers[4]),
|
||||
Size: int64(len(layers[4])),
|
||||
},
|
||||
},
|
||||
@ -1797,12 +1797,12 @@ func TestBaseImageList(t *testing.T) {
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[0]),
|
||||
Digest: godigest.FromBytes(layers[0]),
|
||||
Size: int64(len(layers[0])),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.layer.v1.tar",
|
||||
Digest: digest.FromBytes(layers[1]),
|
||||
Digest: godigest.FromBytes(layers[1]),
|
||||
Size: int64(len(layers[1])),
|
||||
},
|
||||
},
|
||||
@ -2444,7 +2444,7 @@ func TestImageList(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
var imageConfigInfo ispec.Image
|
||||
imageConfigBuf, err := imageStore.GetBlobContent(repos[0], imageManifest.Config.Digest.String())
|
||||
imageConfigBuf, err := imageStore.GetBlobContent(repos[0], imageManifest.Config.Digest)
|
||||
So(err, ShouldBeNil)
|
||||
err = json.Unmarshal(imageConfigBuf, &imageConfigInfo)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2506,7 +2506,7 @@ func TestImageList(t *testing.T) {
|
||||
OS: "linux",
|
||||
RootFS: ispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{},
|
||||
DiffIDs: []godigest.Digest{},
|
||||
},
|
||||
Author: "ZotUser",
|
||||
History: []ispec.History{},
|
||||
@ -2515,8 +2515,8 @@ func TestImageList(t *testing.T) {
|
||||
configBlob, err := json.Marshal(config)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configDigest := digest.FromBytes(configBlob)
|
||||
layerDigest := digest.FromString(invalid)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
layerDigest := godigest.FromString(invalid)
|
||||
layerblob := []byte(invalid)
|
||||
schemaVersion := 2
|
||||
ispecManifest := ispec.Manifest{
|
||||
@ -2623,7 +2623,7 @@ func TestBuildImageInfo(t *testing.T) {
|
||||
OS: "linux",
|
||||
RootFS: ispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{},
|
||||
DiffIDs: []godigest.Digest{},
|
||||
},
|
||||
Author: "ZotUser",
|
||||
History: []ispec.History{ // should contain 3 elements, 2 of which corresponding to layers
|
||||
@ -2642,8 +2642,8 @@ func TestBuildImageInfo(t *testing.T) {
|
||||
configBlob, err := json.Marshal(config)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configDigest := digest.FromBytes(configBlob)
|
||||
layerDigest := digest.FromString(invalid)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
layerDigest := godigest.FromString(invalid)
|
||||
layerblob := []byte(invalid)
|
||||
schemaVersion := 2
|
||||
ispecManifest := ispec.Manifest{
|
||||
@ -2666,7 +2666,7 @@ func TestBuildImageInfo(t *testing.T) {
|
||||
manifestLayersSize := ispecManifest.Layers[0].Size
|
||||
manifestBlob, err := json.Marshal(ispecManifest)
|
||||
So(err, ShouldBeNil)
|
||||
manifestDigest := digest.FromBytes(manifestBlob)
|
||||
manifestDigest := godigest.FromBytes(manifestBlob)
|
||||
err = UploadImage(
|
||||
Image{
|
||||
Manifest: ispecManifest,
|
||||
@ -2704,7 +2704,7 @@ func TestBaseOciLayoutUtils(t *testing.T) {
|
||||
|
||||
Convey("GetImageManifestSize fail", t, func() {
|
||||
mockStoreController := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo, digest string) ([]byte, error) {
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return []byte{}, ErrTestError
|
||||
},
|
||||
}
|
||||
@ -2718,7 +2718,7 @@ func TestBaseOciLayoutUtils(t *testing.T) {
|
||||
|
||||
Convey("GetImageConfigSize: fail GetImageBlobManifest", t, func() {
|
||||
mockStoreController := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(repo, digest string) ([]byte, error) {
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
return []byte{}, ErrTestError
|
||||
},
|
||||
}
|
||||
@ -2732,8 +2732,8 @@ func TestBaseOciLayoutUtils(t *testing.T) {
|
||||
|
||||
Convey("GetImageConfigSize: config GetBlobContent fail", t, func() {
|
||||
mockStoreController := mocks.MockedImageStore{
|
||||
GetBlobContentFn: func(_, digest string) ([]byte, error) {
|
||||
if digest == manifestDigest {
|
||||
GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
if digest.String() == manifestDigest {
|
||||
return []byte{}, ErrTestError
|
||||
}
|
||||
|
||||
@ -2929,7 +2929,7 @@ func TestImageSummary(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configBlob, errConfig := json.Marshal(config)
|
||||
configDigest := digest.FromBytes(configBlob)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
So(errConfig, ShouldBeNil) // marshall success, config is valid JSON
|
||||
go startServer(ctlr)
|
||||
defer stopServer(ctlr)
|
||||
@ -2938,7 +2938,7 @@ func TestImageSummary(t *testing.T) {
|
||||
manifestBlob, errMarsal := json.Marshal(manifest)
|
||||
So(errMarsal, ShouldBeNil)
|
||||
So(manifestBlob, ShouldNotBeNil)
|
||||
manifestDigest := digest.FromBytes(manifestBlob)
|
||||
manifestDigest := godigest.FromBytes(manifestBlob)
|
||||
repoName := "test-repo" //nolint:goconst
|
||||
|
||||
tagTarget := "latest"
|
||||
@ -2979,11 +2979,11 @@ func TestImageSummary(t *testing.T) {
|
||||
|
||||
imgSummary := imgSummaryResponse.SingleImageSummary.ImageSummary
|
||||
So(imgSummary.RepoName, ShouldContainSubstring, repoName)
|
||||
So(imgSummary.ConfigDigest, ShouldContainSubstring, configDigest.Hex())
|
||||
So(imgSummary.Digest, ShouldContainSubstring, manifestDigest.Hex())
|
||||
So(imgSummary.ConfigDigest, ShouldContainSubstring, configDigest.Encoded())
|
||||
So(imgSummary.Digest, ShouldContainSubstring, manifestDigest.Encoded())
|
||||
So(len(imgSummary.Layers), ShouldEqual, 1)
|
||||
So(imgSummary.Layers[0].Digest, ShouldContainSubstring,
|
||||
digest.FromBytes(layers[0]).Hex())
|
||||
godigest.FromBytes(layers[0]).Encoded())
|
||||
|
||||
t.Log("starting Test retrieve duplicated image same layers based on image identifier")
|
||||
// gqlEndpoint
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
notreg "github.com/notaryproject/notation-go/registry"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -19,10 +18,10 @@ import (
|
||||
)
|
||||
|
||||
type OciLayoutUtils interface { //nolint: interfacebloat
|
||||
GetImageManifest(repo string, reference string) (ispec.Manifest, string, error)
|
||||
GetImageManifests(image string) ([]ispec.Descriptor, error)
|
||||
GetImageBlobManifest(imageDir string, digest godigest.Digest) (v1.Manifest, error)
|
||||
GetImageInfo(imageDir string, hash v1.Hash) (ispec.Image, error)
|
||||
GetImageManifest(repo string, reference string) (ispec.Manifest, godigest.Digest, error)
|
||||
GetImageManifests(repo string) ([]ispec.Descriptor, error)
|
||||
GetImageBlobManifest(repo string, digest godigest.Digest) (ispec.Manifest, error)
|
||||
GetImageInfo(repo string, digest godigest.Digest) (ispec.Image, error)
|
||||
GetImageTagsWithTimestamp(repo string) ([]TagInfo, error)
|
||||
GetImagePlatform(imageInfo ispec.Image) (string, string)
|
||||
GetImageManifestSize(repo string, manifestDigest godigest.Digest) int64
|
||||
@ -44,26 +43,26 @@ func NewBaseOciLayoutUtils(storeController storage.StoreController, log log.Logg
|
||||
return &BaseOciLayoutUtils{Log: log, StoreController: storeController}
|
||||
}
|
||||
|
||||
func (olu BaseOciLayoutUtils) GetImageManifest(repo string, reference string) (ispec.Manifest, string, error) {
|
||||
func (olu BaseOciLayoutUtils) GetImageManifest(repo string, reference string) (ispec.Manifest, godigest.Digest, error) {
|
||||
imageStore := olu.StoreController.GetImageStore(repo)
|
||||
|
||||
if reference == "" {
|
||||
reference = "latest"
|
||||
}
|
||||
|
||||
buf, dig, _, err := imageStore.GetImageManifest(repo, reference)
|
||||
manifestBlob, digest, _, err := imageStore.GetImageManifest(repo, reference)
|
||||
if err != nil {
|
||||
return ispec.Manifest{}, "", err
|
||||
}
|
||||
|
||||
var manifest ispec.Manifest
|
||||
|
||||
err = json.Unmarshal(buf, &manifest)
|
||||
err = json.Unmarshal(manifestBlob, &manifest)
|
||||
if err != nil {
|
||||
return ispec.Manifest{}, "", err
|
||||
}
|
||||
|
||||
return manifest, dig, nil
|
||||
return manifest, digest, nil
|
||||
}
|
||||
|
||||
// Provide a list of repositories from all the available image stores.
|
||||
@ -89,10 +88,10 @@ func (olu BaseOciLayoutUtils) GetRepositories() ([]string, error) {
|
||||
}
|
||||
|
||||
// Below method will return image path including root dir, root dir is determined by splitting.
|
||||
func (olu BaseOciLayoutUtils) GetImageManifests(image string) ([]ispec.Descriptor, error) {
|
||||
imageStore := olu.StoreController.GetImageStore(image)
|
||||
func (olu BaseOciLayoutUtils) GetImageManifests(repo string) ([]ispec.Descriptor, error) {
|
||||
imageStore := olu.StoreController.GetImageStore(repo)
|
||||
|
||||
buf, err := imageStore.GetIndexContent(image)
|
||||
buf, err := imageStore.GetIndexContent(repo)
|
||||
if err != nil {
|
||||
if goerrors.Is(errors.ErrRepoNotFound, err) {
|
||||
olu.Log.Error().Err(err).Msg("index.json doesn't exist")
|
||||
@ -108,7 +107,7 @@ func (olu BaseOciLayoutUtils) GetImageManifests(image string) ([]ispec.Descripto
|
||||
var index ispec.Index
|
||||
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
olu.Log.Error().Err(err).Str("dir", path.Join(imageStore.RootDir(), image)).Msg("invalid JSON")
|
||||
olu.Log.Error().Err(err).Str("dir", path.Join(imageStore.RootDir(), repo)).Msg("invalid JSON")
|
||||
|
||||
return nil, errors.ErrRepoNotFound
|
||||
}
|
||||
@ -116,12 +115,12 @@ func (olu BaseOciLayoutUtils) GetImageManifests(image string) ([]ispec.Descripto
|
||||
return index.Manifests, nil
|
||||
}
|
||||
|
||||
func (olu BaseOciLayoutUtils) GetImageBlobManifest(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
var blobIndex v1.Manifest
|
||||
func (olu BaseOciLayoutUtils) GetImageBlobManifest(repo string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
var blobIndex ispec.Manifest
|
||||
|
||||
imageStore := olu.StoreController.GetImageStore(imageDir)
|
||||
imageStore := olu.StoreController.GetImageStore(repo)
|
||||
|
||||
blobBuf, err := imageStore.GetBlobContent(imageDir, digest.String())
|
||||
blobBuf, err := imageStore.GetBlobContent(repo, digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to open image metadata file")
|
||||
|
||||
@ -137,12 +136,12 @@ func (olu BaseOciLayoutUtils) GetImageBlobManifest(imageDir string, digest godig
|
||||
return blobIndex, nil
|
||||
}
|
||||
|
||||
func (olu BaseOciLayoutUtils) GetImageInfo(imageDir string, hash v1.Hash) (ispec.Image, error) {
|
||||
func (olu BaseOciLayoutUtils) GetImageInfo(repo string, digest godigest.Digest) (ispec.Image, error) {
|
||||
var imageInfo ispec.Image
|
||||
|
||||
imageStore := olu.StoreController.GetImageStore(imageDir)
|
||||
imageStore := olu.StoreController.GetImageStore(repo)
|
||||
|
||||
blobBuf, err := imageStore.GetBlobContent(imageDir, hash.String())
|
||||
blobBuf, err := imageStore.GetBlobContent(repo, digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to open image layers file")
|
||||
|
||||
@ -190,7 +189,7 @@ func (olu BaseOciLayoutUtils) GetImageTagsWithTimestamp(repo string) ([]TagInfo,
|
||||
|
||||
timeStamp := GetImageLastUpdated(imageInfo)
|
||||
|
||||
tagsInfo = append(tagsInfo, TagInfo{Name: val, Timestamp: timeStamp, Digest: digest.String()})
|
||||
tagsInfo = append(tagsInfo, TagInfo{Name: val, Timestamp: timeStamp, Digest: digest})
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,7 +201,7 @@ func (olu BaseOciLayoutUtils) checkNotarySignature(name string, digest godigest.
|
||||
imageStore := olu.StoreController.GetImageStore(name)
|
||||
mediaType := notreg.ArtifactTypeNotation
|
||||
|
||||
_, err := imageStore.GetReferrers(name, digest.String(), mediaType)
|
||||
_, err := imageStore.GetReferrers(name, digest, mediaType)
|
||||
if err != nil {
|
||||
olu.Log.Info().Err(err).Str("repo", name).Str("digest",
|
||||
digest.String()).Str("mediatype", mediaType).Msg("invalid notary signature")
|
||||
@ -266,7 +265,7 @@ func (olu BaseOciLayoutUtils) GetImageConfigInfo(repo string, manifestDigest god
|
||||
func (olu BaseOciLayoutUtils) GetImageManifestSize(repo string, manifestDigest godigest.Digest) int64 {
|
||||
imageStore := olu.StoreController.GetImageStore(repo)
|
||||
|
||||
manifestBlob, err := imageStore.GetBlobContent(repo, manifestDigest.String())
|
||||
manifestBlob, err := imageStore.GetBlobContent(repo, manifestDigest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("error when getting manifest blob content")
|
||||
|
||||
@ -332,7 +331,8 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error)
|
||||
|
||||
tag, ok := man.Annotations[ispec.AnnotationRefName]
|
||||
if !ok {
|
||||
olu.Log.Info().Msgf("skipping manifest with digest %s because it doesn't have a tag", string(man.Digest))
|
||||
olu.Log.Info().Msgf("skipping manifest with digest %s because it doesn't have a tag",
|
||||
man.Digest.String())
|
||||
|
||||
continue
|
||||
}
|
||||
@ -347,11 +347,11 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error)
|
||||
isSigned := olu.CheckManifestSignature(name, man.Digest)
|
||||
|
||||
manifestSize := olu.GetImageManifestSize(name, man.Digest)
|
||||
olu.Log.Debug().Msg(fmt.Sprintf("%v", man.Digest))
|
||||
olu.Log.Debug().Msg(fmt.Sprintf("%v", man.Digest.String()))
|
||||
configSize := manifest.Config.Size
|
||||
|
||||
repoBlob2Size[man.Digest.String()] = manifestSize
|
||||
repoBlob2Size[manifest.Config.Digest.Hex] = configSize
|
||||
repoBlob2Size[manifest.Config.Digest.String()] = configSize
|
||||
|
||||
imageConfigInfo, err := olu.GetImageConfigInfo(name, man.Digest)
|
||||
if err != nil {
|
||||
@ -373,7 +373,7 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error)
|
||||
for _, layer := range manifest.Layers {
|
||||
layerInfo := LayerSummary{}
|
||||
|
||||
layerInfo.Digest = layer.Digest.Hex
|
||||
layerInfo.Digest = layer.Digest.String()
|
||||
|
||||
repoBlob2Size[layerInfo.Digest] = layer.Size
|
||||
|
||||
@ -392,8 +392,8 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error)
|
||||
repoVendors = append(repoVendors, annotations.Vendor)
|
||||
|
||||
size := strconv.Itoa(int(imageSize))
|
||||
manifestDigest := man.Digest.Hex()
|
||||
configDigest := manifest.Config.Digest.Hex
|
||||
manifestDigest := man.Digest.String()
|
||||
configDigest := manifest.Config.Digest.String()
|
||||
lastUpdated := GetImageLastUpdated(imageConfigInfo)
|
||||
score := 0
|
||||
|
||||
@ -419,7 +419,7 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error)
|
||||
|
||||
imageSummaries = append(imageSummaries, imageSummary)
|
||||
|
||||
if man.Digest.String() == lastUpdatedTag.Digest {
|
||||
if man.Digest.String() == lastUpdatedTag.Digest.String() {
|
||||
lastUpdatedImageSummary = imageSummary
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,7 @@ package cveinfo
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
@ -31,8 +30,8 @@ type Scanner interface {
|
||||
|
||||
type ImageInfoByCVE struct {
|
||||
Tag string
|
||||
Digest digest.Digest
|
||||
Manifest v1.Manifest
|
||||
Digest godigest.Digest
|
||||
Manifest ispec.Manifest
|
||||
}
|
||||
|
||||
type ImageCVESummary struct {
|
||||
|
@ -15,9 +15,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
regTypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"gopkg.in/resty.v1"
|
||||
@ -394,6 +393,8 @@ func TestDownloadDB(t *testing.T) {
|
||||
func TestCVESearch(t *testing.T) {
|
||||
Convey("Test image vulnerability scanning", t, func() {
|
||||
updateDuration, _ = time.ParseDuration("1h")
|
||||
dbDir := "../../../../test/data"
|
||||
|
||||
port := GetFreePort()
|
||||
baseURL := GetBaseURL(port)
|
||||
conf := config.New()
|
||||
@ -785,7 +786,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "0.1.0",
|
||||
},
|
||||
Digest: "abcc",
|
||||
Digest: godigest.FromString("abcc"),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
@ -793,7 +794,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "1.0.0",
|
||||
},
|
||||
Digest: "abcd",
|
||||
Digest: godigest.FromString("abcd"),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
@ -801,7 +802,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "1.1.0",
|
||||
},
|
||||
Digest: "abce",
|
||||
Digest: godigest.FromString("abce"),
|
||||
},
|
||||
{
|
||||
MediaType: "application/vnd.oci.image.manifest.v1+json",
|
||||
@ -809,7 +810,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "1.0.1",
|
||||
},
|
||||
Digest: "abcf",
|
||||
Digest: godigest.FromString("abcf"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@ -823,7 +824,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "1.0.0",
|
||||
},
|
||||
Digest: "abcd",
|
||||
Digest: godigest.FromString("abcd"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@ -837,22 +838,22 @@ func TestCVEStruct(t *testing.T) {
|
||||
return []common.TagInfo{
|
||||
{
|
||||
Name: "0.1.0",
|
||||
Digest: "abcc",
|
||||
Digest: godigest.FromString("abcc"),
|
||||
Timestamp: time.Date(2008, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Name: "1.0.0",
|
||||
Digest: "abcd",
|
||||
Digest: godigest.FromString("abcd"),
|
||||
Timestamp: time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Name: "1.1.0",
|
||||
Digest: "abce",
|
||||
Digest: godigest.FromString("abce"),
|
||||
Timestamp: time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Name: "1.0.1",
|
||||
Digest: "abcf",
|
||||
Digest: godigest.FromString("abcf"),
|
||||
Timestamp: time.Date(2011, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
}, nil
|
||||
@ -863,7 +864,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
return []common.TagInfo{
|
||||
{
|
||||
Name: "1.0.0",
|
||||
Digest: "abcd",
|
||||
Digest: godigest.FromString("abcd"),
|
||||
Timestamp: time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
}, nil
|
||||
@ -872,15 +873,15 @@ func TestCVEStruct(t *testing.T) {
|
||||
// By default do not return any tags
|
||||
return []common.TagInfo{}, errors.ErrRepoNotFound
|
||||
},
|
||||
GetImageBlobManifestFn: func(imageDir string, digest digest.Digest) (v1.Manifest, error) {
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
// Valid image for scanning
|
||||
if imageDir == "repo1" { //nolint: goconst
|
||||
return v1.Manifest{
|
||||
Layers: []v1.Descriptor{
|
||||
return ispec.Manifest{
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: regTypes.OCILayer,
|
||||
MediaType: ispec.MediaTypeImageLayer,
|
||||
Size: 0,
|
||||
Digest: v1.Hash{},
|
||||
Digest: godigest.Digest(""),
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
@ -888,18 +889,18 @@ func TestCVEStruct(t *testing.T) {
|
||||
|
||||
// Image with non-scannable blob
|
||||
if imageDir == "repo2" { //nolint: goconst
|
||||
return v1.Manifest{
|
||||
Layers: []v1.Descriptor{
|
||||
return ispec.Manifest{
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
MediaType: regTypes.OCIRestrictedLayer,
|
||||
MediaType: string(regTypes.OCIRestrictedLayer),
|
||||
Size: 0,
|
||||
Digest: v1.Hash{},
|
||||
Digest: godigest.Digest(""),
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return v1.Manifest{}, errors.ErrBlobNotFound
|
||||
return ispec.Manifest{}, errors.ErrBlobNotFound
|
||||
},
|
||||
}
|
||||
|
||||
@ -1010,7 +1011,7 @@ func TestCVEStruct(t *testing.T) {
|
||||
|
||||
for _, imageLayer := range imageLayers {
|
||||
switch imageLayer.MediaType {
|
||||
case regTypes.OCILayer, regTypes.DockerLayer:
|
||||
case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerGzip, string(regTypes.DockerLayer):
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
@ -1211,9 +1212,9 @@ func TestCVEStruct(t *testing.T) {
|
||||
Convey("Test error while reading blob manifest", func() {
|
||||
localLayoutUtils := layoutUtils
|
||||
localLayoutUtils.GetImageBlobManifestFn = func(imageDir string,
|
||||
digest digest.Digest,
|
||||
) (v1.Manifest, error) {
|
||||
return v1.Manifest{}, errors.ErrBlobNotFound
|
||||
digest godigest.Digest,
|
||||
) (ispec.Manifest, error) {
|
||||
return ispec.Manifest{}, errors.ErrBlobNotFound
|
||||
}
|
||||
|
||||
cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: localLayoutUtils}
|
||||
|
@ -167,7 +167,7 @@ func (scanner Scanner) IsImageFormatScannable(image string) (bool, error) {
|
||||
|
||||
for _, imageLayer := range imageLayers {
|
||||
switch imageLayer.MediaType {
|
||||
case regTypes.OCILayer, regTypes.DockerLayer:
|
||||
case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerGzip, string(regTypes.DockerLayer):
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
|
@ -34,7 +34,7 @@ func generateTestImage(storeController storage.StoreController, image string) {
|
||||
for _, layerBlob := range layers {
|
||||
layerReader := bytes.NewReader(layerBlob)
|
||||
layerDigest := godigest.FromBytes(layerBlob)
|
||||
_, _, err = store.FullBlobUpload(repoName, layerReader, layerDigest.String())
|
||||
_, _, err = store.FullBlobUpload(repoName, layerReader, layerDigest)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ func generateTestImage(storeController storage.StoreController, image string) {
|
||||
So(err, ShouldBeNil)
|
||||
configReader := bytes.NewReader(configBlob)
|
||||
configDigest := godigest.FromBytes(configBlob)
|
||||
_, _, err = store.FullBlobUpload(repoName, configReader, configDigest.String())
|
||||
_, _, err = store.FullBlobUpload(repoName, configReader, configDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestBlob, err := json.Marshal(manifest)
|
||||
|
@ -3,8 +3,7 @@ package digestinfo
|
||||
import (
|
||||
"strings"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
@ -20,8 +19,8 @@ type DigestInfo struct {
|
||||
|
||||
type ImageInfoByDigest struct {
|
||||
Tag string
|
||||
Digest digest.Digest
|
||||
Manifest v1.Manifest
|
||||
Digest godigest.Digest
|
||||
Manifest ispec.Manifest
|
||||
}
|
||||
|
||||
// NewDigestInfo initializes a new DigestInfo object.
|
||||
@ -64,14 +63,14 @@ func (digestinfo DigestInfo) GetImageTagsByDigest(repo, digest string) ([]ImageI
|
||||
|
||||
// Check the image config matches the search digest
|
||||
// This is a blob with mediaType application/vnd.oci.image.config.v1+json
|
||||
if strings.Contains(imageBlobManifest.Config.Digest.Algorithm+":"+imageBlobManifest.Config.Digest.Hex, digest) {
|
||||
if strings.Contains(imageBlobManifest.Config.Digest.String(), digest) {
|
||||
tags = append(tags, &val)
|
||||
}
|
||||
|
||||
// Check to see if the individual layers in the oci image manifest match the digest
|
||||
// These are blobs with mediaType application/vnd.oci.image.layer.v1.tar+gzip
|
||||
for _, layer := range imageBlobManifest.Layers {
|
||||
if strings.Contains(layer.Digest.Algorithm+":"+layer.Digest.Hex, digest) {
|
||||
if strings.Contains(layer.Digest.String(), digest) {
|
||||
tags = append(tags, &val)
|
||||
}
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
glob "github.com/bmatcuk/doublestar/v4"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/vektah/gqlparser/v2/gqlerror"
|
||||
@ -141,7 +140,7 @@ func repoListWithNewestImage(
|
||||
|
||||
configSize := imageBlobManifest.Config.Size
|
||||
repoBlob2Size[manifest.Digest.String()] = manifestSize
|
||||
repoBlob2Size[imageBlobManifest.Config.Digest.Hex] = configSize
|
||||
repoBlob2Size[imageBlobManifest.Config.Digest.String()] = configSize
|
||||
|
||||
for _, layer := range imageBlobManifest.Layers {
|
||||
repoBlob2Size[layer.Digest.String()] = layer.Size
|
||||
@ -208,8 +207,8 @@ func repoListWithNewestImage(
|
||||
|
||||
tag := manifestTag
|
||||
size := strconv.Itoa(int(imageSize))
|
||||
manifestDigest := manifest.Digest.Hex()
|
||||
configDigest := imageBlobManifest.Config.Digest.Hex
|
||||
manifestDigest := manifest.Digest.String()
|
||||
configDigest := imageBlobManifest.Config.Digest.String()
|
||||
isSigned := olu.CheckManifestSignature(repo, manifest.Digest)
|
||||
lastUpdated := common.GetImageLastUpdated(imageConfigInfo)
|
||||
score := 0
|
||||
@ -237,7 +236,7 @@ func repoListWithNewestImage(
|
||||
},
|
||||
}
|
||||
|
||||
if manifest.Digest.String() == lastUpdatedTag.Digest {
|
||||
if manifest.Digest.String() == lastUpdatedTag.Digest.String() {
|
||||
lastUpdatedImageSummary = imageSummary
|
||||
}
|
||||
}
|
||||
@ -329,7 +328,7 @@ func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils
|
||||
configSize := imageBlobManifest.Config.Size
|
||||
|
||||
repoBlob2Size[manifest.Digest.String()] = manifestSize
|
||||
repoBlob2Size[imageBlobManifest.Config.Digest.Hex] = configSize
|
||||
repoBlob2Size[imageBlobManifest.Config.Digest.String()] = configSize
|
||||
|
||||
for _, layer := range imageBlobManifest.Layers {
|
||||
layer := layer
|
||||
@ -381,8 +380,8 @@ func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils
|
||||
// get image info from manifest annotation, if not found get from image config labels.
|
||||
annotations := common.GetAnnotations(imageBlobManifest.Annotations, imageConfigInfo.Config.Labels)
|
||||
|
||||
manifestDigest := manifest.Digest.Hex()
|
||||
configDigest := imageBlobManifest.Config.Digest.Hex
|
||||
manifestDigest := manifest.Digest.String()
|
||||
configDigest := imageBlobManifest.Config.Digest.String()
|
||||
|
||||
repoPlatforms = append(repoPlatforms, osArch)
|
||||
repoVendors = append(repoVendors, &annotations.Vendor)
|
||||
@ -426,7 +425,7 @@ func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils
|
||||
},
|
||||
}
|
||||
|
||||
if manifest.Digest.String() == lastUpdatedTag.Digest {
|
||||
if manifest.Digest.String() == lastUpdatedTag.Digest.String() {
|
||||
lastUpdatedImageSummary = imageSummary
|
||||
}
|
||||
|
||||
@ -529,8 +528,7 @@ func (r *queryResolver) getImageList(store storage.ImageStore, imageName string)
|
||||
// using a loop variable called tag would be reassigned after each iteration, using the same memory address
|
||||
// directly access the value at the current index in the slice as ImageInfo requires pointers to tag fields
|
||||
tag := tagsInfo[i]
|
||||
|
||||
digest := godigest.Digest(tag.Digest)
|
||||
digest := tag.Digest
|
||||
|
||||
manifest, err := layoutUtils.GetImageBlobManifest(repo, digest)
|
||||
if err != nil {
|
||||
@ -568,13 +566,14 @@ func (r *queryResolver) getImageList(store storage.ImageStore, imageName string)
|
||||
}
|
||||
|
||||
func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
manifest v1.Manifest, imageConfig ispec.Image, isSigned bool,
|
||||
manifest ispec.Manifest, imageConfig ispec.Image, isSigned bool,
|
||||
) *gql_generated.ImageSummary {
|
||||
layers := []*gql_generated.LayerSummary{}
|
||||
size := int64(0)
|
||||
log := log.NewLogger("debug", "")
|
||||
allHistory := []*gql_generated.LayerHistory{}
|
||||
formattedManifestDigest := manifestDigest.Hex()
|
||||
formattedManifestDigest := manifestDigest.String()
|
||||
formattedConfigDigest := manifest.Config.Digest.String()
|
||||
annotations := common.GetAnnotations(manifest.Annotations, imageConfig.Config.Labels)
|
||||
lastUpdated := common.GetImageLastUpdated(imageConfig)
|
||||
|
||||
@ -582,7 +581,7 @@ func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
if len(history) == 0 {
|
||||
for _, layer := range manifest.Layers {
|
||||
size += layer.Size
|
||||
digest := layer.Digest.Hex
|
||||
digest := layer.Digest.String()
|
||||
layerSize := strconv.FormatInt(layer.Size, 10)
|
||||
|
||||
layer := &gql_generated.LayerSummary{
|
||||
@ -607,7 +606,7 @@ func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
RepoName: &repo,
|
||||
Tag: &tag,
|
||||
Digest: &formattedManifestDigest,
|
||||
ConfigDigest: &manifest.Config.Digest.Hex,
|
||||
ConfigDigest: &formattedConfigDigest,
|
||||
Size: &formattedSize,
|
||||
Layers: layers,
|
||||
History: allHistory,
|
||||
@ -656,7 +655,7 @@ func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
RepoName: &repo,
|
||||
Tag: &tag,
|
||||
Digest: &formattedManifestDigest,
|
||||
ConfigDigest: &manifest.Config.Digest.Hex,
|
||||
ConfigDigest: &formattedConfigDigest,
|
||||
Size: &formattedSize,
|
||||
Layers: layers,
|
||||
History: allHistory,
|
||||
@ -677,7 +676,7 @@ func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
}
|
||||
|
||||
size += manifest.Layers[layersIterator].Size
|
||||
digest := manifest.Layers[layersIterator].Digest.Hex
|
||||
digest := manifest.Layers[layersIterator].Digest.String()
|
||||
layerSize := strconv.FormatInt(manifest.Layers[layersIterator].Size, 10)
|
||||
|
||||
layer := &gql_generated.LayerSummary{
|
||||
@ -701,7 +700,7 @@ func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest,
|
||||
RepoName: &repo,
|
||||
Tag: &tag,
|
||||
Digest: &formattedManifestDigest,
|
||||
ConfigDigest: &manifest.Config.Digest.Hex,
|
||||
ConfigDigest: &formattedConfigDigest,
|
||||
Size: &formattedSize,
|
||||
Layers: layers,
|
||||
History: allHistory,
|
||||
@ -772,7 +771,7 @@ func extractImageDetails(
|
||||
layoutUtils common.OciLayoutUtils,
|
||||
repo, tag string,
|
||||
log log.Logger) (
|
||||
godigest.Digest, *v1.Manifest, *ispec.Image, error,
|
||||
godigest.Digest, *ispec.Manifest, *ispec.Image, error,
|
||||
) {
|
||||
validRepoList, err := userAvailableRepos(ctx, []string{repo})
|
||||
if err != nil {
|
||||
@ -787,21 +786,14 @@ func extractImageDetails(
|
||||
return "", nil, nil, errors.ErrUnauthorizedAccess
|
||||
}
|
||||
|
||||
_, dig, err := layoutUtils.GetImageManifest(repo, tag)
|
||||
manifest, dig, err := layoutUtils.GetImageManifest(repo, tag)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Could not retrieve image ispec manifest")
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
digest := godigest.Digest(dig)
|
||||
|
||||
manifest, err := layoutUtils.GetImageBlobManifest(repo, digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Could not retrieve image godigest manifest")
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
digest := dig
|
||||
|
||||
imageConfig, err := layoutUtils.GetImageConfigInfo(repo, digest)
|
||||
if err != nil {
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
@ -73,8 +72,8 @@ func TestGlobalSearch(t *testing.T) {
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return v1.Manifest{}, ErrTestError
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
return ispec.Manifest{}, ErrTestError
|
||||
},
|
||||
}
|
||||
mockCve := mocks.CveInfoMock{}
|
||||
@ -116,12 +115,12 @@ func TestGlobalSearch(t *testing.T) {
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return v1.Manifest{
|
||||
Layers: []v1.Descriptor{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
return ispec.Manifest{
|
||||
Layers: []ispec.Descriptor{
|
||||
{
|
||||
Size: 0,
|
||||
Digest: v1.Hash{},
|
||||
Digest: godigest.FromString(""),
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
@ -210,8 +209,8 @@ func TestRepoListWithNewestImage(t *testing.T) {
|
||||
|
||||
Convey("GetImageBlobManifest fail", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return v1.Manifest{}, ErrTestError
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
return ispec.Manifest{}, ErrTestError
|
||||
},
|
||||
GetImageManifestsFn: func(image string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{
|
||||
@ -358,38 +357,18 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
}
|
||||
localTestDigestTry, _ := json.Marshal(localTestManifest)
|
||||
localTestDigest := godigest.FromBytes(localTestDigestTry)
|
||||
localTestManifestV1 := v1.Manifest{
|
||||
Config: v1.Descriptor{
|
||||
Digest: v1.Hash{
|
||||
Algorithm: "sha256",
|
||||
Hex: configDigest.Encoded(),
|
||||
},
|
||||
},
|
||||
Layers: []v1.Descriptor{
|
||||
{
|
||||
Size: 4,
|
||||
Digest: v1.Hash{
|
||||
Algorithm: "sha256",
|
||||
Hex: layerDigest.Encoded(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Convey("extractImageDetails good workflow", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return localTestManifestV1, nil
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, digest godigest.Digest) (
|
||||
ispec.Image, error,
|
||||
) {
|
||||
return config, nil
|
||||
},
|
||||
GetImageManifestFn: func(repo string, tag string) (
|
||||
ispec.Manifest, string, error,
|
||||
ispec.Manifest, godigest.Digest, error,
|
||||
) {
|
||||
return localTestManifest, localTestDigest.String(), nil
|
||||
return localTestManifest, localTestDigest, nil
|
||||
},
|
||||
}
|
||||
resDigest, resManifest, resIspecImage, resErr := extractImageDetails(ctx,
|
||||
@ -403,19 +382,15 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
|
||||
Convey("extractImageDetails bad ispec.ImageManifest", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return localTestManifestV1, nil
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, digest godigest.Digest) (
|
||||
ispec.Image, error,
|
||||
) {
|
||||
return config, nil
|
||||
},
|
||||
GetImageManifestFn: func(repo string, tag string) (
|
||||
ispec.Manifest, string, error,
|
||||
ispec.Manifest, godigest.Digest, error,
|
||||
) {
|
||||
// localTestManifest = nil
|
||||
return ispec.Manifest{}, localTestDigest.String() + "aaa", ErrTestError
|
||||
return ispec.Manifest{}, localTestDigest, ErrTestError
|
||||
},
|
||||
}
|
||||
resDigest, resManifest, resIspecImage, resErr := extractImageDetails(ctx,
|
||||
@ -427,45 +402,17 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
So(resIspecImage, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("extractImageDetails bad ImageBlobManifest", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return localTestManifestV1, ErrTestError
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, digest godigest.Digest) (
|
||||
ispec.Image, error,
|
||||
) {
|
||||
return config, nil
|
||||
},
|
||||
GetImageManifestFn: func(repo string, tag string) (
|
||||
ispec.Manifest, string, error,
|
||||
) {
|
||||
return localTestManifest, localTestDigest.String(), nil
|
||||
},
|
||||
}
|
||||
resDigest, resManifest, resIspecImage, resErr := extractImageDetails(ctx,
|
||||
mockOlum, "zot-test", "latest", testLogger)
|
||||
So(string(resDigest), ShouldEqual, "")
|
||||
So(resManifest, ShouldBeNil)
|
||||
|
||||
So(resIspecImage, ShouldBeNil)
|
||||
So(resErr, ShouldEqual, ErrTestError)
|
||||
})
|
||||
|
||||
Convey("extractImageDetails bad imageConfig", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return localTestManifestV1, nil
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, digest godigest.Digest) (
|
||||
ispec.Image, error,
|
||||
) {
|
||||
return config, nil
|
||||
},
|
||||
GetImageManifestFn: func(repo string, tag string) (
|
||||
ispec.Manifest, string, error,
|
||||
ispec.Manifest, godigest.Digest, error,
|
||||
) {
|
||||
return localTestManifest, localTestDigest.String(), ErrTestError
|
||||
return localTestManifest, localTestDigest, ErrTestError
|
||||
},
|
||||
}
|
||||
resDigest, resManifest, resIspecImage, resErr := extractImageDetails(ctx,
|
||||
@ -484,18 +431,15 @@ func TestExtractImageDetails(t *testing.T) {
|
||||
Username: "jane_doe",
|
||||
})
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return localTestManifestV1, nil
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, digest godigest.Digest) (
|
||||
ispec.Image, error,
|
||||
) {
|
||||
return config, nil
|
||||
},
|
||||
GetImageManifestFn: func(repo string, tag string) (
|
||||
ispec.Manifest, string, error,
|
||||
ispec.Manifest, godigest.Digest, error,
|
||||
) {
|
||||
return localTestManifest, localTestDigest.String(), ErrTestError
|
||||
return localTestManifest, localTestDigest, ErrTestError
|
||||
},
|
||||
}
|
||||
resDigest, resManifest, resIspecImage, resErr := extractImageDetails(ctx,
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
"zotregistry.io/zot/pkg/extensions/search/gql_generated"
|
||||
)
|
||||
@ -114,11 +113,11 @@ func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, im
|
||||
}
|
||||
|
||||
for _, tag := range tagsInfo {
|
||||
digest := godigest.Digest(tag.Digest)
|
||||
digest := tag.Digest
|
||||
|
||||
manifest, err := olu.GetImageBlobManifest(image, digest)
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Str("repo", image).Str("digest", tag.Digest).
|
||||
r.log.Error().Err(err).Str("repo", image).Str("digest", tag.Digest.String()).
|
||||
Msg("extension api: error reading manifest")
|
||||
|
||||
return unaffectedImages, err
|
||||
@ -331,10 +330,17 @@ func (r *queryResolver) ExpandedRepoInfo(ctx context.Context, repo string) (*gql
|
||||
for _, image := range origRepoInfo.ImageSummaries {
|
||||
tag := image.Tag
|
||||
digest := image.Digest
|
||||
configDigest := image.ConfigDigest
|
||||
isSigned := image.IsSigned
|
||||
size := image.Size
|
||||
|
||||
imageSummary := &gql_generated.ImageSummary{Tag: &tag, Digest: &digest, IsSigned: &isSigned, RepoName: &repo}
|
||||
imageSummary := &gql_generated.ImageSummary{
|
||||
Tag: &tag,
|
||||
Digest: &digest,
|
||||
ConfigDigest: &configDigest,
|
||||
IsSigned: &isSigned,
|
||||
RepoName: &repo,
|
||||
}
|
||||
|
||||
layers := make([]*gql_generated.LayerSummary, 0)
|
||||
|
||||
@ -445,7 +451,7 @@ func (r *queryResolver) DerivedImageList(ctx context.Context, image string) ([]*
|
||||
|
||||
for _, l := range imageLayers {
|
||||
for _, k := range layers {
|
||||
if *k.Digest == l.Digest.Encoded() {
|
||||
if *k.Digest == l.Digest.String() {
|
||||
sameLayer++
|
||||
}
|
||||
}
|
||||
@ -517,7 +523,7 @@ func (r *queryResolver) BaseImageList(ctx context.Context, image string) ([]*gql
|
||||
foundLayer := false
|
||||
|
||||
for _, k := range imageLayers {
|
||||
if *l.Digest == k.Digest.Encoded() {
|
||||
if *l.Digest == k.Digest.String() {
|
||||
foundLayer = true
|
||||
|
||||
break
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
notreg "github.com/notaryproject/notation-go/registry"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/sigstore/cosign/pkg/oci/remote"
|
||||
@ -38,10 +39,10 @@ func newSignaturesCopier(httpClient *resty.Client, upstreamURL url.URL,
|
||||
}
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) getCosignManifest(repo, digest string) (*ispec.Manifest, error) {
|
||||
func (sig *signaturesCopier) getCosignManifest(repo, digestStr string) (*ispec.Manifest, error) {
|
||||
var cosignManifest ispec.Manifest
|
||||
|
||||
cosignTag := getCosignTagFromImageDigest(digest)
|
||||
cosignTag := getCosignTagFromImageDigest(digestStr)
|
||||
|
||||
getCosignManifestURL := sig.upstreamURL
|
||||
|
||||
@ -85,14 +86,14 @@ func (sig *signaturesCopier) getCosignManifest(repo, digest string) (*ispec.Mani
|
||||
return &cosignManifest, nil
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) getNotaryRefs(repo, digest string) (ReferenceList, error) {
|
||||
func (sig *signaturesCopier) getNotaryRefs(repo, digestStr string) (ReferenceList, error) {
|
||||
var referrers ReferenceList
|
||||
|
||||
getReferrersURL := sig.upstreamURL
|
||||
|
||||
// based on manifest digest get referrers
|
||||
getReferrersURL.Path = path.Join(getReferrersURL.Path, constants.ArtifactSpecRoutePrefix,
|
||||
repo, "manifests", digest, "referrers")
|
||||
repo, "manifests", digestStr, "referrers")
|
||||
|
||||
getReferrersURL.RawQuery = getReferrersURL.Query().Encode()
|
||||
|
||||
@ -132,19 +133,20 @@ func (sig *signaturesCopier) getNotaryRefs(repo, digest string) (ReferenceList,
|
||||
return referrers, nil
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digest string, cosignManifest *ispec.Manifest,
|
||||
func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digestStr string,
|
||||
cosignManifest *ispec.Manifest,
|
||||
) error {
|
||||
cosignTag := getCosignTagFromImageDigest(digest)
|
||||
cosignTag := getCosignTagFromImageDigest(digestStr)
|
||||
|
||||
// if no manifest found
|
||||
if cosignManifest == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
skipCosignSig, err := sig.canSkipCosignSignature(localRepo, digest, cosignManifest)
|
||||
skipCosignSig, err := sig.canSkipCosignSignature(localRepo, digestStr, cosignManifest)
|
||||
if err != nil {
|
||||
sig.log.Error().Err(err).Msgf("couldn't check if the upstream image %s:%s cosign signature can be skipped",
|
||||
remoteRepo, digest)
|
||||
remoteRepo, digestStr)
|
||||
}
|
||||
|
||||
if skipCosignSig {
|
||||
@ -178,7 +180,7 @@ func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digest s
|
||||
defer resp.RawBody().Close()
|
||||
|
||||
// push blob
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), blob.Digest.String())
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), blob.Digest)
|
||||
if err != nil {
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msg("couldn't upload cosign blob")
|
||||
@ -210,7 +212,7 @@ func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digest s
|
||||
defer resp.RawBody().Close()
|
||||
|
||||
// push config blob
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), cosignManifest.Config.Digest.String())
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), cosignManifest.Config.Digest)
|
||||
if err != nil {
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msg("couldn't upload cosign config blob")
|
||||
@ -234,21 +236,21 @@ func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digest s
|
||||
return err
|
||||
}
|
||||
|
||||
sig.log.Info().Msgf("successfully synced cosign signature for repo %s digest %s", localRepo, digest)
|
||||
sig.log.Info().Msgf("successfully synced cosign signature for repo %s digest %s", localRepo, digestStr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) syncNotarySignature(localRepo, remoteRepo, digest string, referrers ReferenceList,
|
||||
func (sig *signaturesCopier) syncNotarySignature(localRepo, remoteRepo, digestStr string, referrers ReferenceList,
|
||||
) error {
|
||||
if len(referrers.References) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
skipNotarySig, err := sig.canSkipNotarySignature(localRepo, digest, referrers)
|
||||
skipNotarySig, err := sig.canSkipNotarySignature(localRepo, digestStr, referrers)
|
||||
if skipNotarySig || err != nil {
|
||||
sig.log.Error().Err(err).Msgf("couldn't check if the upstream image %s:%s notary signature can be skipped",
|
||||
remoteRepo, digest)
|
||||
remoteRepo, digestStr)
|
||||
}
|
||||
|
||||
if skipNotarySig {
|
||||
@ -307,7 +309,7 @@ func (sig *signaturesCopier) syncNotarySignature(localRepo, remoteRepo, digest s
|
||||
return zerr.ErrSyncSignature
|
||||
}
|
||||
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), blob.Digest.String())
|
||||
_, _, err = imageStore.FullBlobUpload(localRepo, resp.RawBody(), blob.Digest)
|
||||
if err != nil {
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msg("couldn't upload notary sig blob")
|
||||
@ -326,14 +328,15 @@ func (sig *signaturesCopier) syncNotarySignature(localRepo, remoteRepo, digest s
|
||||
}
|
||||
}
|
||||
|
||||
sig.log.Info().Msgf("successfully synced notary signature for repo %s digest %s", localRepo, digest)
|
||||
sig.log.Info().Msgf("successfully synced notary signature for repo %s digest %s", localRepo, digestStr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) canSkipNotarySignature(localRepo, digest string, refs ReferenceList,
|
||||
func (sig *signaturesCopier) canSkipNotarySignature(localRepo, digestStr string, refs ReferenceList,
|
||||
) (bool, error) {
|
||||
imageStore := sig.storeController.GetImageStore(localRepo)
|
||||
digest := godigest.Digest(digestStr)
|
||||
|
||||
// check notary signature already synced
|
||||
if len(refs.References) > 0 {
|
||||
@ -344,24 +347,24 @@ func (sig *signaturesCopier) canSkipNotarySignature(localRepo, digest string, re
|
||||
}
|
||||
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msgf("couldn't get local notary signature %s:%s manifest", localRepo, digest)
|
||||
Err(err).Msgf("couldn't get local notary signature %s:%s manifest", localRepo, digestStr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !artifactDescriptorsEqual(localRefs, refs.References) {
|
||||
sig.log.Info().Msgf("upstream notary signatures %s:%s changed, syncing again", localRepo, digest)
|
||||
sig.log.Info().Msgf("upstream notary signatures %s:%s changed, syncing again", localRepo, digestStr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
sig.log.Info().Msgf("skipping notary signature %s:%s, already synced", localRepo, digest)
|
||||
sig.log.Info().Msgf("skipping notary signature %s:%s, already synced", localRepo, digestStr)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digest string, cosignManifest *ispec.Manifest,
|
||||
func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digestStr string, cosignManifest *ispec.Manifest,
|
||||
) (bool, error) {
|
||||
imageStore := sig.storeController.GetImageStore(localRepo)
|
||||
// check cosign signature already synced
|
||||
@ -370,7 +373,7 @@ func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digest string, co
|
||||
|
||||
/* we need to use tag (cosign format: sha256-$IMAGE_TAG.sig) instead of digest to get local cosign manifest
|
||||
because of an issue where cosign digests differs between upstream and downstream */
|
||||
cosignManifestTag := getCosignTagFromImageDigest(digest)
|
||||
cosignManifestTag := getCosignTagFromImageDigest(digestStr)
|
||||
|
||||
localCosignManifestBuf, _, _, err := imageStore.GetImageManifest(localRepo, cosignManifestTag)
|
||||
if err != nil {
|
||||
@ -379,7 +382,7 @@ func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digest string, co
|
||||
}
|
||||
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msgf("couldn't get local cosign %s:%s manifest", localRepo, digest)
|
||||
Err(err).Msgf("couldn't get local cosign %s:%s manifest", localRepo, digestStr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
@ -387,19 +390,19 @@ func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digest string, co
|
||||
err = json.Unmarshal(localCosignManifestBuf, &localCosignManifest)
|
||||
if err != nil {
|
||||
sig.log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Msgf("couldn't unmarshal local cosign signature %s:%s manifest", localRepo, digest)
|
||||
Err(err).Msgf("couldn't unmarshal local cosign signature %s:%s manifest", localRepo, digestStr)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !manifestsEqual(localCosignManifest, *cosignManifest) {
|
||||
sig.log.Info().Msgf("upstream cosign signatures %s:%s changed, syncing again", localRepo, digest)
|
||||
sig.log.Info().Msgf("upstream cosign signatures %s:%s changed, syncing again", localRepo, digestStr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
sig.log.Info().Msgf("skipping cosign signature %s:%s, already synced", localRepo, digest)
|
||||
sig.log.Info().Msgf("skipping cosign signature %s:%s, already synced", localRepo, digestStr)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@ -414,10 +417,10 @@ func isCosignTag(tag string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func getCosignTagFromImageDigest(digest string) string {
|
||||
if !isCosignTag(digest) {
|
||||
return strings.Replace(digest, ":", "-", 1) + "." + remote.SignatureTagSuffix
|
||||
func getCosignTagFromImageDigest(digestStr string) string {
|
||||
if !isCosignTag(digestStr) {
|
||||
return strings.Replace(digestStr, ":", "-", 1) + "." + remote.SignatureTagSuffix
|
||||
}
|
||||
|
||||
return digest
|
||||
return digestStr
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ func syncRegistry(ctx context.Context, regCfg RegistryConfig,
|
||||
|
||||
tag := getTagFromRef(upstreamImageRef, log).Tag()
|
||||
|
||||
skipImage, err := canSkipImage(localRepo, tag, upstreamImageDigest.String(), imageStore, log)
|
||||
skipImage, err := canSkipImage(localRepo, tag, upstreamImageDigest, imageStore, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("couldn't check if the upstream image %s can be skipped",
|
||||
upstreamImageRef.DockerReference())
|
||||
|
@ -374,14 +374,14 @@ func TestSyncInternal(t *testing.T) {
|
||||
|
||||
sig := newSignaturesCopier(resty.New(), *regURL, storage.StoreController{DefaultStore: imageStore}, log)
|
||||
|
||||
canBeSkipped, err = sig.canSkipNotarySignature(testImage, testImageManifestDigest, refs)
|
||||
canBeSkipped, err = sig.canSkipNotarySignature(testImage, testImageManifestDigest.String(), refs)
|
||||
So(err, ShouldBeNil)
|
||||
So(canBeSkipped, ShouldBeFalse)
|
||||
|
||||
err = os.Chmod(path.Join(imageStore.RootDir(), testImage, "index.json"), 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
canBeSkipped, err = sig.canSkipNotarySignature(testImage, testImageManifestDigest, refs)
|
||||
canBeSkipped, err = sig.canSkipNotarySignature(testImage, testImageManifestDigest.String(), refs)
|
||||
So(err, ShouldNotBeNil)
|
||||
So(canBeSkipped, ShouldBeFalse)
|
||||
|
||||
@ -392,7 +392,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
err = os.Chmod(path.Join(imageStore.RootDir(), testImage, "index.json"), 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
canBeSkipped, err = sig.canSkipCosignSignature(testImage, testImageManifestDigest, &cosignManifest)
|
||||
canBeSkipped, err = sig.canSkipCosignSignature(testImage, testImageManifestDigest.String(), &cosignManifest)
|
||||
So(err, ShouldBeNil)
|
||||
So(canBeSkipped, ShouldBeFalse)
|
||||
})
|
||||
@ -480,7 +480,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
|
||||
for _, layer := range layers {
|
||||
// upload layer
|
||||
_, _, err := testImageStore.FullBlobUpload(repo, bytes.NewReader(layer), godigest.FromBytes(layer).String())
|
||||
_, _, err := testImageStore.FullBlobUpload(repo, bytes.NewReader(layer), godigest.FromBytes(layer))
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
@ -489,7 +489,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
|
||||
configDigest := godigest.FromBytes(configContent)
|
||||
|
||||
_, _, err = testImageStore.FullBlobUpload(repo, bytes.NewReader(configContent), configDigest.String())
|
||||
_, _, err = testImageStore.FullBlobUpload(repo, bytes.NewReader(configContent), configDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestContent, err := json.Marshal(manifest)
|
||||
@ -612,7 +612,7 @@ func TestSyncInternal(t *testing.T) {
|
||||
}
|
||||
|
||||
if err := os.Chmod(path.Join(testRootDir, testImage, "blobs", "sha256",
|
||||
manifest.Layers[0].Digest.Hex()), 0o000); err != nil {
|
||||
manifest.Layers[0].Digest.Encoded()), 0o000); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -620,12 +620,12 @@ func TestSyncInternal(t *testing.T) {
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
if err := os.Chmod(path.Join(testRootDir, testImage, "blobs", "sha256",
|
||||
manifest.Layers[0].Digest.Hex()), 0o755); err != nil {
|
||||
manifest.Layers[0].Digest.Encoded()), 0o755); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cachedManifestConfigPath := path.Join(imageStore.RootDir(), testImage, SyncBlobUploadDir,
|
||||
testImage, "blobs", "sha256", manifest.Config.Digest.Hex())
|
||||
testImage, "blobs", "sha256", manifest.Config.Digest.Encoded())
|
||||
if err := os.Chmod(cachedManifestConfigPath, 0o000); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -2356,7 +2356,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
|
||||
Convey("Trigger error on image manifest", func() {
|
||||
// trigger permission denied on image manifest
|
||||
manifestPath := path.Join(srcDir, repoName, "blobs",
|
||||
string(imageManifestDigest.Algorithm()), imageManifestDigest.Hex())
|
||||
string(imageManifestDigest.Algorithm()), imageManifestDigest.Encoded())
|
||||
err = os.Chmod(manifestPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -2373,7 +2373,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
|
||||
|
||||
Convey("Trigger error on cosign signature", func() {
|
||||
// trigger permission error on cosign signature on upstream
|
||||
cosignTag := string(imageManifestDigest.Algorithm()) + "-" + imageManifestDigest.Hex() +
|
||||
cosignTag := string(imageManifestDigest.Algorithm()) + "-" + imageManifestDigest.Encoded() +
|
||||
"." + remote.SignatureTagSuffix
|
||||
|
||||
getCosignManifestURL := srcBaseURL + path.Join(constants.RoutePrefix, repoName, "manifests", cosignTag)
|
||||
@ -2386,7 +2386,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
for _, blob := range cm.Layers {
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.Chmod(blobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
@ -2424,7 +2424,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
|
||||
// read manifest
|
||||
var artifactManifest artifactspec.Manifest
|
||||
for _, ref := range referrers.References {
|
||||
refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Hex())
|
||||
refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Encoded())
|
||||
body, err := os.ReadFile(refPath)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -2433,7 +2433,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
|
||||
|
||||
// triggers perm denied on sig blobs
|
||||
for _, blob := range artifactManifest.Blobs {
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.Chmod(blobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
@ -2592,7 +2592,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
var artifactManifest artifactspec.Manifest
|
||||
for _, ref := range referrers.References {
|
||||
refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Hex())
|
||||
refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Encoded())
|
||||
body, err := os.ReadFile(refPath)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -2601,7 +2601,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
// triggers perm denied on notary sig blobs on downstream
|
||||
for _, blob := range artifactManifest.Blobs {
|
||||
blobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
blobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.MkdirAll(blobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.Chmod(blobPath, 0o000)
|
||||
@ -2620,7 +2620,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
// triggers perm denied on notary manifest on downstream
|
||||
for _, ref := range referrers.References {
|
||||
refPath := path.Join(destDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Hex())
|
||||
refPath := path.Join(destDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Encoded())
|
||||
err := os.MkdirAll(refPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.Chmod(refPath, 0o000)
|
||||
@ -2634,7 +2634,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
// triggers perm denied on sig blobs
|
||||
for _, blob := range artifactManifest.Blobs {
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.Chmod(blobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
@ -2679,7 +2679,7 @@ func TestSignatures(t *testing.T) {
|
||||
cosignManifestDigest := godigest.FromBytes(buf)
|
||||
|
||||
for _, blob := range imageManifest.Layers {
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.Chmod(blobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
@ -2698,11 +2698,11 @@ func TestSignatures(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
for _, blob := range imageManifest.Layers {
|
||||
srcBlobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
srcBlobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err := os.Chmod(srcBlobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
destBlobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
destBlobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err = os.MkdirAll(destBlobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.Chmod(destBlobPath, 0o755)
|
||||
@ -2715,7 +2715,7 @@ func TestSignatures(t *testing.T) {
|
||||
So(resp.StatusCode(), ShouldEqual, 200)
|
||||
|
||||
for _, blob := range imageManifest.Layers {
|
||||
destBlobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex())
|
||||
destBlobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Encoded())
|
||||
err = os.Chmod(destBlobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.Remove(destBlobPath)
|
||||
@ -2724,7 +2724,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
// trigger error on upstream config blob
|
||||
srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()),
|
||||
imageManifest.Config.Digest.Hex())
|
||||
imageManifest.Config.Digest.Encoded())
|
||||
err = os.Chmod(srcConfigBlobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -2746,7 +2746,7 @@ func TestSignatures(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
destConfigBlobPath := path.Join(destDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()),
|
||||
imageManifest.Config.Digest.Hex())
|
||||
imageManifest.Config.Digest.Encoded())
|
||||
|
||||
err = os.MkdirAll(destConfigBlobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
@ -2764,7 +2764,7 @@ func TestSignatures(t *testing.T) {
|
||||
|
||||
// trigger error on downstream manifest
|
||||
destManifestPath := path.Join(destDir, repoName, "blobs", string(cosignManifestDigest.Algorithm()),
|
||||
cosignManifestDigest.Hex())
|
||||
cosignManifestDigest.Encoded())
|
||||
err = os.MkdirAll(destManifestPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.Chmod(destManifestPath, 0o000)
|
||||
@ -3277,7 +3277,7 @@ func TestSignaturesOnDemand(t *testing.T) {
|
||||
// trigger errors on cosign blobs
|
||||
// trigger error on cosign config blob
|
||||
srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()),
|
||||
imageManifest.Config.Digest.Hex())
|
||||
imageManifest.Config.Digest.Encoded())
|
||||
err = os.Chmod(srcConfigBlobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -3292,7 +3292,7 @@ func TestSignaturesOnDemand(t *testing.T) {
|
||||
|
||||
// trigger error on cosign layer blob
|
||||
srcSignatureBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Layers[0].Digest.Algorithm()),
|
||||
imageManifest.Layers[0].Digest.Hex())
|
||||
imageManifest.Layers[0].Digest.Encoded())
|
||||
|
||||
err = os.Chmod(srcConfigBlobPath, 0o755)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/containers/image/v5/oci/layout"
|
||||
"github.com/containers/image/v5/types"
|
||||
guuid "github.com/gofrs/uuid"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
"github.com/sigstore/cosign/pkg/oci/static"
|
||||
@ -374,7 +375,7 @@ func pushSyncedLocalImage(localRepo, tag, localCachePath string,
|
||||
}
|
||||
|
||||
for _, manifest := range indexManifest.Manifests {
|
||||
manifestBuf, err := cacheImageStore.GetBlobContent(localRepo, manifest.Digest.String())
|
||||
manifestBuf, err := cacheImageStore.GetBlobContent(localRepo, manifest.Digest)
|
||||
if err != nil {
|
||||
log.Error().Str("errorType", TypeOf(err)).
|
||||
Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), localRepo)).Str("digest", manifest.Digest.String()).
|
||||
@ -423,14 +424,14 @@ func copyManifest(localRepo string, manifestContent []byte, reference string,
|
||||
}
|
||||
|
||||
for _, blob := range manifest.Layers {
|
||||
err = copyBlob(localRepo, blob.Digest.String(), blob.MediaType,
|
||||
err = copyBlob(localRepo, blob.Digest, blob.MediaType,
|
||||
cacheImageStore, imageStore, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = copyBlob(localRepo, manifest.Config.Digest.String(), manifest.Config.MediaType,
|
||||
err = copyBlob(localRepo, manifest.Config.Digest, manifest.Config.MediaType,
|
||||
cacheImageStore, imageStore, log)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -449,7 +450,7 @@ func copyManifest(localRepo string, manifestContent []byte, reference string,
|
||||
}
|
||||
|
||||
// Copy a blob from one image store to another image store.
|
||||
func copyBlob(localRepo, blobDigest, blobMediaType string,
|
||||
func copyBlob(localRepo string, blobDigest godigest.Digest, blobMediaType string,
|
||||
souceImageStore, destinationImageStore storage.ImageStore, log log.Logger,
|
||||
) error {
|
||||
if found, _, _ := destinationImageStore.CheckBlob(localRepo, blobDigest); found {
|
||||
@ -461,7 +462,7 @@ func copyBlob(localRepo, blobDigest, blobMediaType string,
|
||||
if err != nil {
|
||||
log.Error().Str("errorType", TypeOf(err)).Err(err).
|
||||
Str("dir", path.Join(souceImageStore.RootDir(), localRepo)).
|
||||
Str("blob digest", blobDigest).Str("media type", blobMediaType).
|
||||
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
|
||||
Msg("couldn't read blob")
|
||||
|
||||
return err
|
||||
@ -471,7 +472,7 @@ func copyBlob(localRepo, blobDigest, blobMediaType string,
|
||||
_, _, err = destinationImageStore.FullBlobUpload(localRepo, blobReadCloser, blobDigest)
|
||||
if err != nil {
|
||||
log.Error().Str("errorType", TypeOf(err)).Err(err).
|
||||
Str("blob digest", blobDigest).Str("media type", blobMediaType).
|
||||
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
|
||||
Msg("couldn't upload blob")
|
||||
}
|
||||
|
||||
@ -554,7 +555,8 @@ func getLocalCachePath(imageStore storage.ImageStore, repo string) (string, erro
|
||||
}
|
||||
|
||||
// canSkipImage returns whether or not we already synced this image.
|
||||
func canSkipImage(repo, tag, digest string, imageStore storage.ImageStore, log log.Logger) (bool, error) {
|
||||
func canSkipImage(repo, tag string, digest godigest.Digest, imageStore storage.ImageStore, log log.Logger,
|
||||
) (bool, error) {
|
||||
// check image already synced
|
||||
_, localImageManifestDigest, _, err := imageStore.GetImageManifest(repo, tag)
|
||||
if err != nil {
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"zotregistry.io/zot/errors"
|
||||
@ -69,9 +70,9 @@ func NewCache(rootDir string, name string, useRelPaths bool, log zlog.Logger) *C
|
||||
return &Cache{rootDir: rootDir, db: cacheDB, useRelPaths: useRelPaths, log: log}
|
||||
}
|
||||
|
||||
func (c *Cache) PutBlob(digest, path string) error {
|
||||
func (c *Cache) PutBlob(digest godigest.Digest, path string) error {
|
||||
if path == "" {
|
||||
c.log.Error().Err(errors.ErrEmptyValue).Str("digest", digest).Msg("empty path provided")
|
||||
c.log.Error().Err(errors.ErrEmptyValue).Str("digest", digest.String()).Msg("empty path provided")
|
||||
|
||||
return errors.ErrEmptyValue
|
||||
}
|
||||
@ -95,10 +96,10 @@ func (c *Cache) PutBlob(digest, path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, err := root.CreateBucketIfNotExists([]byte(digest))
|
||||
bucket, err := root.CreateBucketIfNotExists([]byte(digest.String()))
|
||||
if err != nil {
|
||||
// this is a serious failure
|
||||
c.log.Error().Err(err).Str("bucket", digest).Msg("unable to create a bucket")
|
||||
c.log.Error().Err(err).Str("bucket", digest.String()).Msg("unable to create a bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
@ -145,7 +146,7 @@ func (c *Cache) PutBlob(digest, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) GetBlob(digest string) (string, error) {
|
||||
func (c *Cache) GetBlob(digest godigest.Digest) (string, error) {
|
||||
var blobPath strings.Builder
|
||||
|
||||
if err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
@ -158,7 +159,7 @@ func (c *Cache) GetBlob(digest string) (string, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := root.Bucket([]byte(digest))
|
||||
bucket := root.Bucket([]byte(digest.String()))
|
||||
if bucket != nil {
|
||||
origin := bucket.Bucket([]byte(OriginalBucket))
|
||||
blobPath.WriteString(string(c.getOne(origin)))
|
||||
@ -174,7 +175,7 @@ func (c *Cache) GetBlob(digest string) (string, error) {
|
||||
return blobPath.String(), nil
|
||||
}
|
||||
|
||||
func (c *Cache) HasBlob(digest, blob string) bool {
|
||||
func (c *Cache) HasBlob(digest godigest.Digest, blob string) bool {
|
||||
if err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
root := tx.Bucket([]byte(BlobsCache))
|
||||
if root == nil {
|
||||
@ -185,7 +186,7 @@ func (c *Cache) HasBlob(digest, blob string) bool {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := root.Bucket([]byte(digest))
|
||||
bucket := root.Bucket([]byte(digest.String()))
|
||||
if bucket == nil {
|
||||
return errors.ErrCacheMiss
|
||||
}
|
||||
@ -218,7 +219,7 @@ func (c *Cache) getOne(bucket *bbolt.Bucket) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
func (c *Cache) DeleteBlob(digest godigest.Digest, path string) error {
|
||||
// use only relative (to rootDir) paths on blobs
|
||||
var err error
|
||||
if c.useRelPaths {
|
||||
@ -238,7 +239,7 @@ func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := root.Bucket([]byte(digest))
|
||||
bucket := root.Bucket([]byte(digest.String()))
|
||||
if bucket == nil {
|
||||
return errors.ErrCacheMiss
|
||||
}
|
||||
@ -249,7 +250,7 @@ func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
}
|
||||
|
||||
if err := deduped.Delete([]byte(path)); err != nil {
|
||||
c.log.Error().Err(err).Str("digest", digest).Str("bucket", DuplicatesBucket).
|
||||
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", DuplicatesBucket).
|
||||
Str("path", path).Msg("unable to delete")
|
||||
|
||||
return err
|
||||
@ -260,7 +261,7 @@ func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
originBlob := c.getOne(origin)
|
||||
if originBlob != nil {
|
||||
if err := origin.Delete([]byte(path)); err != nil {
|
||||
c.log.Error().Err(err).Str("digest", digest).Str("bucket", OriginalBucket).
|
||||
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", OriginalBucket).
|
||||
Str("path", path).Msg("unable to delete")
|
||||
|
||||
return err
|
||||
@ -270,7 +271,8 @@ func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
dedupedBlob := c.getOne(deduped)
|
||||
if dedupedBlob != nil {
|
||||
if err := origin.Put(dedupedBlob, nil); err != nil {
|
||||
c.log.Error().Err(err).Str("digest", digest).Str("bucket", OriginalBucket).Str("path", path).Msg("unable to put")
|
||||
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", OriginalBucket).Str("path", path).
|
||||
Msg("unable to put")
|
||||
|
||||
return err
|
||||
}
|
||||
@ -281,9 +283,10 @@ func (c *Cache) DeleteBlob(digest, path string) error {
|
||||
// if no key in origin bucket then digest bucket is empty, remove it
|
||||
k := c.getOne(origin)
|
||||
if k == nil {
|
||||
c.log.Debug().Str("digest", digest).Str("path", path).Msg("deleting empty bucket")
|
||||
if err := root.DeleteBucket([]byte(digest)); err != nil {
|
||||
c.log.Error().Err(err).Str("digest", digest).Str("bucket", digest).Str("path", path).Msg("unable to delete")
|
||||
c.log.Debug().Str("digest", digest.String()).Str("path", path).Msg("deleting empty bucket")
|
||||
if err := root.DeleteBucket([]byte(digest.String())); err != nil {
|
||||
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", digest.String()).Str("path", path).
|
||||
Msg("unable to delete")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Desc
|
||||
|
||||
func ValidateManifest(imgStore ImageStore, repo, reference, mediaType string, body []byte,
|
||||
log zerolog.Logger,
|
||||
) (string, error) {
|
||||
) (godigest.Digest, error) {
|
||||
// validate the manifest
|
||||
if !IsSupportedMediaType(mediaType) {
|
||||
log.Debug().Interface("actual", mediaType).
|
||||
@ -100,7 +100,7 @@ func ValidateManifest(imgStore ImageStore, repo, reference, mediaType string, bo
|
||||
|
||||
func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *ispec.Manifest, //nolint:unparam
|
||||
log zerolog.Logger,
|
||||
) (string, error) {
|
||||
) (godigest.Digest, error) {
|
||||
if manifest.SchemaVersion != SchemaVersion {
|
||||
log.Error().Int("SchemaVersion", manifest.SchemaVersion).Msg("invalid manifest")
|
||||
|
||||
@ -110,9 +110,9 @@ func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *
|
||||
// validate image config
|
||||
config := manifest.Config
|
||||
|
||||
blobFile, _, err := imgStore.GetBlob(repo, config.Digest.String(), "")
|
||||
blobFile, _, err := imgStore.GetBlob(repo, config.Digest, "")
|
||||
if err != nil {
|
||||
return config.Digest.String(), zerr.ErrBlobNotFound
|
||||
return config.Digest, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
defer blobFile.Close()
|
||||
@ -126,9 +126,9 @@ func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *
|
||||
|
||||
// validate the layers
|
||||
for _, l := range manifest.Layers {
|
||||
blobFile, _, err := imgStore.GetBlob(repo, l.Digest.String(), "")
|
||||
blobFile, _, err := imgStore.GetBlob(repo, l.Digest, "")
|
||||
if err != nil {
|
||||
return l.Digest.String(), zerr.ErrBlobNotFound
|
||||
return l.Digest, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
defer blobFile.Close()
|
||||
@ -137,10 +137,10 @@ func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func GetAndValidateRequestDigest(body []byte, digest string, log zerolog.Logger) (godigest.Digest, error) {
|
||||
func GetAndValidateRequestDigest(body []byte, digestStr string, log zerolog.Logger) (godigest.Digest, error) {
|
||||
bodyDigest := godigest.FromBytes(body)
|
||||
|
||||
d, err := godigest.Parse(digest)
|
||||
d, err := godigest.Parse(digestStr)
|
||||
if err == nil {
|
||||
if d.String() != bodyDigest.String() {
|
||||
log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()).
|
||||
@ -324,7 +324,7 @@ func PruneImageManifestsFromIndex(imgStore ImageStore, repo string, digest godig
|
||||
|
||||
indexPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
|
||||
|
||||
buf, err := imgStore.GetBlobContent(repo, digest.String())
|
||||
buf, err := imgStore.GetBlobContent(repo, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -343,7 +343,7 @@ func PruneImageManifestsFromIndex(imgStore ImageStore, repo string, digest godig
|
||||
}
|
||||
|
||||
for _, otherIndex := range otherImgIndexes {
|
||||
buf, err := imgStore.GetBlobContent(repo, otherIndex.Digest.String())
|
||||
buf, err := imgStore.GetBlobContent(repo, otherIndex.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -31,12 +31,12 @@ func TestValidateManifest(t *testing.T) {
|
||||
digest := godigest.FromBytes(content)
|
||||
So(digest, ShouldNotBeNil)
|
||||
|
||||
_, blen, err := imgStore.FullBlobUpload("test", bytes.NewReader(content), digest.String())
|
||||
_, blen, err := imgStore.FullBlobUpload("test", bytes.NewReader(content), digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blen, ShouldEqual, len(content))
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
|
||||
|
@ -390,7 +390,7 @@ func (is *ImageStoreLocal) GetImageTags(repo string) ([]string, error) {
|
||||
}
|
||||
|
||||
// GetImageManifest returns the image manifest of an image in the specific repository.
|
||||
func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, string, string, error) {
|
||||
func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, godigest.Digest, string, error) {
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if !is.DirExists(dir) {
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
@ -406,7 +406,7 @@ func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, str
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest.String())
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
@ -424,22 +424,22 @@ func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, str
|
||||
|
||||
monitoring.IncDownloadCounter(is.metrics, repo)
|
||||
|
||||
return buf, manifestDesc.Digest.String(), manifestDesc.MediaType, nil
|
||||
return buf, manifestDesc.Digest, manifestDesc.MediaType, nil
|
||||
}
|
||||
|
||||
// PutImageManifest adds an image manifest to the repository.
|
||||
func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo
|
||||
body []byte,
|
||||
) (string, error) {
|
||||
) (godigest.Digest, error) {
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
is.log.Debug().Err(err).Msg("init repo")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
dig, err := storage.ValidateManifest(is, repo, reference, mediaType, body, is.log)
|
||||
digest, err := storage.ValidateManifest(is, repo, reference, mediaType, body, is.log)
|
||||
if err != nil {
|
||||
return dig, err
|
||||
return digest, err
|
||||
}
|
||||
|
||||
refIsDigest := true
|
||||
@ -447,7 +447,7 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
mDigest, err := storage.GetAndValidateRequestDigest(body, reference, is.log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadManifest) {
|
||||
return mDigest.String(), err
|
||||
return mDigest, err
|
||||
}
|
||||
|
||||
refIsDigest = false
|
||||
@ -473,7 +473,7 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
return desc.Digest, nil
|
||||
}
|
||||
|
||||
var lockLatency time.Time
|
||||
@ -543,7 +543,7 @@ func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, /
|
||||
monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
|
||||
monitoring.IncUploadCounter(is.metrics, repo)
|
||||
|
||||
return desc.Digest.String(), nil
|
||||
return desc.Digest, nil
|
||||
}
|
||||
|
||||
// DeleteImageManifest deletes the image manifest from the repository.
|
||||
@ -778,17 +778,14 @@ func (is *ImageStoreLocal) BlobUploadInfo(repo, uuid string) (int64, error) {
|
||||
}
|
||||
|
||||
// FinishBlobUpload finalizes the blob upload and moves blob the repository.
|
||||
func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error {
|
||||
dstDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return zerr.ErrBadBlobDigest
|
||||
func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, dstDigest godigest.Digest) error {
|
||||
if err := dstDigest.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src := is.BlobUploadPath(repo, uuid)
|
||||
|
||||
_, err = os.Stat(src)
|
||||
_, err := os.Stat(src)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", src).Msg("failed to stat blob")
|
||||
|
||||
@ -808,7 +805,8 @@ func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, d
|
||||
|
||||
_, err = io.Copy(digester, blobFile)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("repo", repo).Str("blob", src).Str("digest", digest).Msg("unable to compute hash")
|
||||
is.log.Error().Err(err).Str("repo", repo).Str("blob", src).Str("digest", dstDigest.String()).
|
||||
Msg("unable to compute hash")
|
||||
|
||||
return err
|
||||
}
|
||||
@ -859,16 +857,14 @@ func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, d
|
||||
}
|
||||
|
||||
// FullBlobUpload handles a full blob upload, and no partial session is created.
|
||||
func (is *ImageStoreLocal) FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) {
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
func (is *ImageStoreLocal) FullBlobUpload(repo string, body io.Reader, dstDigest godigest.Digest,
|
||||
) (string, int64, error) {
|
||||
if err := dstDigest.Validate(); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
dstDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return "", -1, zerr.ErrBadBlobDigest
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
u, err := guuid.NewV4()
|
||||
@ -944,7 +940,7 @@ func (is *ImageStoreLocal) DedupeBlob(src string, dstDigest godigest.Digest, dst
|
||||
retry:
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
||||
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest.String())
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest)
|
||||
|
||||
if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record")
|
||||
@ -954,7 +950,7 @@ retry:
|
||||
|
||||
if dstRecord == "" {
|
||||
// cache record doesn't exist, so first disk and cache entry for this diges
|
||||
if err := is.cache.PutBlob(dstDigest.String(), dst); err != nil {
|
||||
if err := is.cache.PutBlob(dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
|
||||
|
||||
return err
|
||||
@ -977,7 +973,7 @@ retry:
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
|
||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
if err := is.cache.DeleteBlob(dstDigest.String(), dstRecord); err != nil {
|
||||
if err := is.cache.DeleteBlob(dstDigest, dstRecord); err != nil {
|
||||
//nolint:lll // gofumpt conflicts with lll
|
||||
is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record")
|
||||
|
||||
@ -1041,17 +1037,14 @@ func (is *ImageStoreLocal) BlobPath(repo string, digest godigest.Digest) string
|
||||
}
|
||||
|
||||
// CheckBlob verifies a blob and returns true if the blob is correct.
|
||||
func (is *ImageStoreLocal) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
func (is *ImageStoreLocal) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
parsedDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return false, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, parsedDigest)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
if is.dedupe && is.cache != nil {
|
||||
is.Lock(&lockLatency)
|
||||
@ -1073,7 +1066,7 @@ func (is *ImageStoreLocal) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("cache: not found")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
|
||||
return false, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
@ -1093,7 +1086,11 @@ func (is *ImageStoreLocal) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
return true, blobSize, nil
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) checkCacheBlob(digest string) (string, error) {
|
||||
func (is *ImageStoreLocal) checkCacheBlob(digest godigest.Digest) (string, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !is.dedupe || is.cache == nil {
|
||||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
@ -1110,7 +1107,8 @@ func (is *ImageStoreLocal) checkCacheBlob(digest string) (string, error) {
|
||||
|
||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
if err := is.cache.DeleteBlob(digest, dstRecord); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Str("blobPath", dstRecord).Msg("unable to remove blob path from cache")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord).
|
||||
Msg("unable to remove blob path from cache")
|
||||
|
||||
return "", err
|
||||
}
|
||||
@ -1118,7 +1116,7 @@ func (is *ImageStoreLocal) checkCacheBlob(digest string) (string, error) {
|
||||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
is.log.Debug().Str("digest", digest).Str("dstRecord", dstRecord).Msg("cache: found dedupe record")
|
||||
is.log.Debug().Str("digest", digest.String()).Str("dstRecord", dstRecord).Msg("cache: found dedupe record")
|
||||
|
||||
return dstRecord, nil
|
||||
}
|
||||
@ -1186,18 +1184,15 @@ func (bs *blobStream) Close() error {
|
||||
|
||||
// GetBlobPartial returns a partial stream to read the blob.
|
||||
// blob selector instead of directly downloading the blob.
|
||||
func (is *ImageStoreLocal) GetBlobPartial(repo, digest, mediaType string, from, to int64,
|
||||
func (is *ImageStoreLocal) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
parsedDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return nil, -1, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, parsedDigest)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
@ -1226,17 +1221,15 @@ func (is *ImageStoreLocal) GetBlobPartial(repo, digest, mediaType string, from,
|
||||
|
||||
// GetBlob returns a stream to read the blob.
|
||||
// blob selector instead of directly downloading the blob.
|
||||
func (is *ImageStoreLocal) GetBlob(repo, digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
func (is *ImageStoreLocal) GetBlob(repo string, digest godigest.Digest, mediaType string,
|
||||
) (io.ReadCloser, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
parsedDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return nil, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, parsedDigest)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
@ -1259,7 +1252,11 @@ func (is *ImageStoreLocal) GetBlob(repo, digest, mediaType string) (io.ReadClose
|
||||
return blobReadCloser, binfo.Size(), nil
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) GetBlobContent(repo, digest string) ([]byte, error) {
|
||||
func (is *ImageStoreLocal) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
@ -1270,7 +1267,7 @@ func (is *ImageStoreLocal) GetBlobContent(repo, digest string) ([]byte, error) {
|
||||
|
||||
_, err = buf.ReadFrom(blob)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to read blob")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Msg("failed to read blob")
|
||||
|
||||
return []byte{}, err
|
||||
}
|
||||
@ -1303,22 +1300,19 @@ func (is *ImageStoreLocal) GetIndexContent(repo string) ([]byte, error) {
|
||||
}
|
||||
|
||||
// DeleteBlob removes the blob from the repository.
|
||||
func (is *ImageStoreLocal) DeleteBlob(repo, digest string) error {
|
||||
func (is *ImageStoreLocal) DeleteBlob(repo string, digest godigest.Digest) error {
|
||||
var lockLatency time.Time
|
||||
|
||||
dgst, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return zerr.ErrBlobNotFound
|
||||
if err := digest.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, dgst)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
_, err = os.Stat(blobPath)
|
||||
_, err := os.Stat(blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
|
||||
|
||||
@ -1327,7 +1321,8 @@ func (is *ImageStoreLocal) DeleteBlob(repo, digest string) error {
|
||||
|
||||
if is.cache != nil {
|
||||
if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Str("blobPath", blobPath).Msg("unable to remove blob path from cache")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath).
|
||||
Msg("unable to remove blob path from cache")
|
||||
|
||||
return err
|
||||
}
|
||||
@ -1342,21 +1337,19 @@ func (is *ImageStoreLocal) DeleteBlob(repo, digest string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStoreLocal) GetReferrers(repo, digest, artifactType string) ([]artifactspec.Descriptor, error) {
|
||||
func (is *ImageStoreLocal) GetReferrers(repo string, gdigest godigest.Digest, artifactType string,
|
||||
) ([]artifactspec.Descriptor, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
if err := gdigest.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if !is.DirExists(dir) {
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
gdigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return nil, zerr.ErrBadBlobDigest
|
||||
}
|
||||
|
||||
index, err := storage.GetIndex(is, repo, is.log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -45,7 +45,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
blobDigest1 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest1, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -81,7 +81,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -91,7 +91,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
})
|
||||
|
@ -60,17 +60,17 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -148,7 +148,7 @@ func TestStorageFSAPIs(t *testing.T) {
|
||||
_, err = imgStore.GetReferrers(repoName, "invalid", "invalid")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, err = imgStore.GetReferrers(repoName, digest.String(), "invalid")
|
||||
_, err = imgStore.GetReferrers(repoName, digest, "invalid")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
// invalid DeleteImageManifest
|
||||
@ -187,7 +187,7 @@ func TestGetReferrers(t *testing.T) {
|
||||
"zot-test", "blobs", digest.Algorithm().String(), digest.Encoded()),
|
||||
buf.Bytes(), 0o644)
|
||||
So(err, ShouldBeNil)
|
||||
_, n, err := imgStore.FullBlobUpload("zot-test", buf, digest.String())
|
||||
_, n, err := imgStore.FullBlobUpload("zot-test", buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(n, ShouldEqual, buflen)
|
||||
|
||||
@ -207,7 +207,7 @@ func TestGetReferrers(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
descriptors, err := imgStore.GetReferrers("zot-test", digest.String(), "signature-example")
|
||||
descriptors, err := imgStore.GetReferrers("zot-test", digest, "signature-example")
|
||||
So(err, ShouldBeNil)
|
||||
So(descriptors, ShouldNotBeEmpty)
|
||||
So(descriptors[0].ArtifactType, ShouldEqual, "signature-example")
|
||||
@ -328,11 +328,11 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
|
||||
t.Errorf("error occurred while generating random blob, %v", err)
|
||||
}
|
||||
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest.String())
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -374,12 +374,12 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
|
||||
t.Errorf("error occurred while generating random blob, %v", err)
|
||||
}
|
||||
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest.String())
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -594,7 +594,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -620,7 +620,7 @@ func FuzzFullBlobUpload(f *testing.F) {
|
||||
t.Errorf("error occurred while generating random blob, %v", err)
|
||||
}
|
||||
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest.String())
|
||||
_, _, err = imgStore.FullBlobUpload(repoName, bytes.NewReader(lblob), ldigest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -646,7 +646,7 @@ func FuzzDedupeBlob(f *testing.F) {
|
||||
src := path.Join(imgStore.RootDir(), "src")
|
||||
blob := bytes.NewReader([]byte(data))
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload("repoName", blob, blobDigest.String())
|
||||
_, _, err := imgStore.FullBlobUpload("repoName", blob, blobDigest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -719,14 +719,14 @@ func FuzzCheckBlob(f *testing.F) {
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
_, _, err = imgStore.CheckBlob(repoName, digest.String())
|
||||
_, _, err = imgStore.CheckBlob(repoName, digest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -745,7 +745,7 @@ func FuzzGetBlob(f *testing.F) {
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -753,7 +753,7 @@ func FuzzGetBlob(f *testing.F) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
blobReadCloser, _, err := imgStore.GetBlob(repoName, digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
blobReadCloser, _, err := imgStore.GetBlob(repoName, digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -778,7 +778,7 @@ func FuzzDeleteBlob(f *testing.F) {
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -786,7 +786,7 @@ func FuzzDeleteBlob(f *testing.F) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = imgStore.DeleteBlob(repoName, digest.String())
|
||||
err = imgStore.DeleteBlob(repoName, digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -808,7 +808,7 @@ func FuzzGetIndexContent(f *testing.F) {
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -838,7 +838,7 @@ func FuzzGetBlobContent(f *testing.F) {
|
||||
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
|
||||
digest := godigest.FromString(data)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -846,7 +846,7 @@ func FuzzGetBlobContent(f *testing.F) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = imgStore.GetBlobContent(repoName, digest.String())
|
||||
_, err = imgStore.GetBlobContent(repoName, digest)
|
||||
if err != nil {
|
||||
if isKnownErr(err) {
|
||||
return
|
||||
@ -879,7 +879,7 @@ func FuzzGetReferrers(f *testing.F) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_, _, err = imgStore.FullBlobUpload("zot-test", buf, digest.String())
|
||||
_, _, err = imgStore.FullBlobUpload("zot-test", buf, digest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -902,7 +902,7 @@ func FuzzGetReferrers(f *testing.F) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_, err = imgStore.GetReferrers("zot-test", digest.String(), data)
|
||||
_, err = imgStore.GetReferrers("zot-test", digest, data)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestNotFound) || isKnownErr(err) {
|
||||
return
|
||||
@ -951,23 +951,23 @@ func TestDedupeLinks(t *testing.T) {
|
||||
blobDigest1 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest1, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("dedupe1", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("dedupe1", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
blobrc, _, err := imgStore.GetBlob("dedupe1", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
blobrc, _, err := imgStore.GetBlob("dedupe1", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
err = blobrc.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("dedupe1", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("dedupe1", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob("dedupe1", cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob("dedupe1", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1011,23 +1011,23 @@ func TestDedupeLinks(t *testing.T) {
|
||||
blobDigest2 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest2, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("dedupe2", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("dedupe2", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
blobrc, _, err = imgStore.GetBlob("dedupe2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
blobrc, _, err = imgStore.GetBlob("dedupe2", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
err = blobrc.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe2", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe2", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe2", cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe2", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1085,7 +1085,7 @@ func TestDedupeLinks(t *testing.T) {
|
||||
blobDigest2 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest2, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe3", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe3", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
})
|
||||
@ -1585,17 +1585,17 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1623,14 +1623,14 @@ func TestGarbageCollect(t *testing.T) {
|
||||
_, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
})
|
||||
@ -1653,7 +1653,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, odigest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, odigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// sleep so orphan blob can be GC'ed
|
||||
@ -1673,17 +1673,17 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob(repoName, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1711,11 +1711,11 @@ func TestGarbageCollect(t *testing.T) {
|
||||
_, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, odigest)
|
||||
So(err, ShouldNotBeNil)
|
||||
So(hasBlob, ShouldEqual, false)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1725,7 +1725,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
err = imgStore.DeleteImageManifest(repoName, digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repoName, bdigest)
|
||||
So(err, ShouldNotBeNil)
|
||||
So(hasBlob, ShouldEqual, false)
|
||||
})
|
||||
@ -1753,17 +1753,17 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repo1Name, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repo1Name, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap := make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload(repo1Name, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload(repo1Name, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob(repo1Name, cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob(repo1Name, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1790,14 +1790,14 @@ func TestGarbageCollect(t *testing.T) {
|
||||
_, err = imgStore.PutImageManifest(repo1Name, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo1Name, tdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo1Name, tdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
// sleep so past GC timeout
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo1Name, tdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo1Name, tdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1816,17 +1816,17 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repo2Name, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repo2Name, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap = make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload(repo2Name, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload(repo2Name, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1853,7 +1853,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
_, err = imgStore.PutImageManifest(repo2Name, tag, ispec.MediaTypeImageManifest, manifestBuf)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, bdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1872,17 +1872,17 @@ func TestGarbageCollect(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repo2Name, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repo2Name, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
annotationsMap = make(map[string]string)
|
||||
annotationsMap[ispec.AnnotationRefName] = tag
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload(repo2Name, bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload(repo2Name, bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1912,7 +1912,7 @@ func TestGarbageCollect(t *testing.T) {
|
||||
|
||||
// original blob should exist
|
||||
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, tdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob(repo2Name, tdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -2144,22 +2144,22 @@ func TestPullRange(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest.String())
|
||||
err = imgStore.FinishBlobUpload(repoName, upload, buf, bdigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, "", "application/octet-stream", 0, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest.String(), "application/octet-stream", 1, 0)
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest, "application/octet-stream", 1, 0)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest.String(), "application/octet-stream", 1, 0)
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest, "application/octet-stream", 1, 0)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
blobPath := path.Join(imgStore.RootDir(), repoName, "blobs", bdigest.Algorithm().String(), bdigest.Encoded())
|
||||
err = os.Chmod(blobPath, 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest.String(), "application/octet-stream", -1, 1)
|
||||
_, _, _, err = imgStore.GetBlobPartial(repoName, bdigest, "application/octet-stream", -1, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
|
@ -306,7 +306,7 @@ func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
|
||||
}
|
||||
|
||||
// GetImageManifest returns the image manifest of an image in the specific repository.
|
||||
func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, string, string, error) {
|
||||
func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, godigest.Digest, string, error) {
|
||||
dir := path.Join(is.rootDir, repo)
|
||||
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
|
||||
return nil, "", "", zerr.ErrRepoNotFound
|
||||
@ -322,7 +322,7 @@ func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, strin
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
}
|
||||
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest.String())
|
||||
buf, err := is.GetBlobContent(repo, manifestDesc.Digest)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBlobNotFound) {
|
||||
return nil, "", "", zerr.ErrManifestNotFound
|
||||
@ -340,13 +340,13 @@ func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, strin
|
||||
|
||||
monitoring.IncDownloadCounter(is.metrics, repo)
|
||||
|
||||
return buf, manifestDesc.Digest.String(), manifestDesc.MediaType, nil
|
||||
return buf, manifestDesc.Digest, manifestDesc.MediaType, nil
|
||||
}
|
||||
|
||||
// PutImageManifest adds an image manifest to the repository.
|
||||
func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo
|
||||
body []byte) (string, error,
|
||||
) {
|
||||
body []byte,
|
||||
) (godigest.Digest, error) {
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
is.log.Debug().Err(err).Msg("init repo")
|
||||
|
||||
@ -363,7 +363,7 @@ func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //n
|
||||
mDigest, err := storage.GetAndValidateRequestDigest(body, reference, is.log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrBadManifest) {
|
||||
return mDigest.String(), err
|
||||
return mDigest, err
|
||||
}
|
||||
|
||||
refIsDigest = false
|
||||
@ -389,7 +389,7 @@ func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //n
|
||||
}
|
||||
|
||||
if !updateIndex {
|
||||
return desc.Digest.String(), nil
|
||||
return desc.Digest, nil
|
||||
}
|
||||
|
||||
var lockLatency time.Time
|
||||
@ -446,7 +446,7 @@ func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //n
|
||||
monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
|
||||
monitoring.IncUploadCounter(is.metrics, repo)
|
||||
|
||||
return desc.Digest.String(), nil
|
||||
return desc.Digest, nil
|
||||
}
|
||||
|
||||
// DeleteImageManifest deletes the image manifest from the repository.
|
||||
@ -675,12 +675,9 @@ func (is *ObjectStorage) BlobUploadInfo(repo, uuid string) (int64, error) {
|
||||
}
|
||||
|
||||
// FinishBlobUpload finalizes the blob upload and moves blob the repository.
|
||||
func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error {
|
||||
dstDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return zerr.ErrBadBlobDigest
|
||||
func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, dstDigest godigest.Digest) error {
|
||||
if err := dstDigest.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src := is.BlobUploadPath(repo, uuid)
|
||||
@ -755,16 +752,13 @@ func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, dig
|
||||
}
|
||||
|
||||
// FullBlobUpload handles a full blob upload, and no partial session is created.
|
||||
func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) {
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, dstDigest godigest.Digest) (string, int64, error) {
|
||||
if err := dstDigest.Validate(); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
dstDigest, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return "", -1, zerr.ErrBadBlobDigest
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
u, err := guuid.NewV4()
|
||||
@ -836,7 +830,7 @@ func (is *ObjectStorage) DedupeBlob(src string, dstDigest godigest.Digest, dst s
|
||||
retry:
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
||||
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest.String())
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest)
|
||||
if err := test.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record")
|
||||
|
||||
@ -845,7 +839,7 @@ retry:
|
||||
|
||||
if dstRecord == "" {
|
||||
// cache record doesn't exist, so first disk and cache entry for this digest
|
||||
if err := is.cache.PutBlob(dstDigest.String(), dst); err != nil {
|
||||
if err := is.cache.PutBlob(dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
|
||||
|
||||
return err
|
||||
@ -866,7 +860,7 @@ retry:
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
|
||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
err := is.cache.DeleteBlob(dstDigest.String(), dstRecord)
|
||||
err := is.cache.DeleteBlob(dstDigest, dstRecord)
|
||||
if err = test.Error(err); err != nil {
|
||||
//nolint:lll
|
||||
is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record")
|
||||
@ -894,7 +888,7 @@ retry:
|
||||
return err
|
||||
}
|
||||
|
||||
if err := is.cache.PutBlob(dstDigest.String(), dst); err != nil {
|
||||
if err := is.cache.PutBlob(dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
|
||||
|
||||
return err
|
||||
@ -951,17 +945,14 @@ func (is *ObjectStorage) BlobPath(repo string, digest godigest.Digest) string {
|
||||
}
|
||||
|
||||
// CheckBlob verifies a blob and returns true if the blob is correct.
|
||||
func (is *ObjectStorage) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
func (is *ObjectStorage) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dgst, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return false, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, dgst)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
if is.dedupe && is.cache != nil {
|
||||
is.Lock(&lockLatency)
|
||||
@ -982,12 +973,11 @@ func (is *ObjectStorage) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("cache: not found")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
|
||||
return false, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
// If found copy to location
|
||||
blobSize, err := is.copyBlob(repo, blobPath, dstRecord)
|
||||
if err != nil {
|
||||
return false, -1, zerr.ErrBlobNotFound
|
||||
@ -1003,7 +993,11 @@ func (is *ObjectStorage) CheckBlob(repo, digest string) (bool, int64, error) {
|
||||
return true, blobSize, nil
|
||||
}
|
||||
|
||||
func (is *ObjectStorage) checkCacheBlob(digest string) (string, error) {
|
||||
func (is *ObjectStorage) checkCacheBlob(digest godigest.Digest) (string, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if is.cache == nil {
|
||||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
@ -1018,7 +1012,8 @@ func (is *ObjectStorage) checkCacheBlob(digest string) (string, error) {
|
||||
|
||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
if err := is.cache.DeleteBlob(digest, dstRecord); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Str("blobPath", dstRecord).Msg("unable to remove blob path from cache")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord).
|
||||
Msg("unable to remove blob path from cache")
|
||||
|
||||
return "", err
|
||||
}
|
||||
@ -1026,7 +1021,7 @@ func (is *ObjectStorage) checkCacheBlob(digest string) (string, error) {
|
||||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
is.log.Debug().Str("digest", digest).Str("dstRecord", dstRecord).Msg("cache: found dedupe record")
|
||||
is.log.Debug().Str("digest", digest.String()).Str("dstRecord", dstRecord).Msg("cache: found dedupe record")
|
||||
|
||||
return dstRecord, nil
|
||||
}
|
||||
@ -1073,18 +1068,15 @@ func (bs *blobStream) Close() error {
|
||||
|
||||
// GetBlobPartial returns a partial stream to read the blob.
|
||||
// blob selector instead of directly downloading the blob.
|
||||
func (is *ObjectStorage) GetBlobPartial(repo, digest, mediaType string, from, to int64,
|
||||
func (is *ObjectStorage) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dgst, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return nil, -1, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, dgst)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
@ -1123,7 +1115,7 @@ func (is *ObjectStorage) GetBlobPartial(repo, digest, mediaType string, from, to
|
||||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("cache: not found")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
|
||||
return nil, -1, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
@ -1164,17 +1156,14 @@ func (is *ObjectStorage) GetBlobPartial(repo, digest, mediaType string, from, to
|
||||
|
||||
// GetBlob returns a stream to read the blob.
|
||||
// blob selector instead of directly downloading the blob.
|
||||
func (is *ObjectStorage) GetBlob(repo, digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
func (is *ObjectStorage) GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {
|
||||
var lockLatency time.Time
|
||||
|
||||
dgst, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return nil, -1, zerr.ErrBadBlobDigest
|
||||
if err := digest.Validate(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, dgst)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.RLock(&lockLatency)
|
||||
defer is.RUnlock(&lockLatency)
|
||||
@ -1198,7 +1187,7 @@ func (is *ObjectStorage) GetBlob(repo, digest, mediaType string) (io.ReadCloser,
|
||||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("cache: not found")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
|
||||
return nil, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
@ -1224,7 +1213,7 @@ func (is *ObjectStorage) GetBlob(repo, digest, mediaType string) (io.ReadCloser,
|
||||
return blobReadCloser, binfo.Size(), nil
|
||||
}
|
||||
|
||||
func (is *ObjectStorage) GetBlobContent(repo, digest string) ([]byte, error) {
|
||||
func (is *ObjectStorage) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
@ -1243,7 +1232,8 @@ func (is *ObjectStorage) GetBlobContent(repo, digest string) ([]byte, error) {
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (is *ObjectStorage) GetReferrers(repo, digest, mediaType string) ([]artifactspec.Descriptor, error) {
|
||||
func (is *ObjectStorage) GetReferrers(repo string, digest godigest.Digest, mediaType string,
|
||||
) ([]artifactspec.Descriptor, error) {
|
||||
return nil, zerr.ErrMethodNotSupported
|
||||
}
|
||||
|
||||
@ -1266,22 +1256,19 @@ func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {
|
||||
}
|
||||
|
||||
// DeleteBlob removes the blob from the repository.
|
||||
func (is *ObjectStorage) DeleteBlob(repo, digest string) error {
|
||||
func (is *ObjectStorage) DeleteBlob(repo string, digest godigest.Digest) error {
|
||||
var lockLatency time.Time
|
||||
|
||||
dgst, err := godigest.Parse(digest)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
|
||||
|
||||
return zerr.ErrBlobNotFound
|
||||
if err := digest.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobPath := is.BlobPath(repo, dgst)
|
||||
blobPath := is.BlobPath(repo, digest)
|
||||
|
||||
is.Lock(&lockLatency)
|
||||
defer is.Unlock(&lockLatency)
|
||||
|
||||
_, err = is.store.Stat(context.Background(), blobPath)
|
||||
_, err := is.store.Stat(context.Background(), blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
|
||||
|
||||
@ -1298,7 +1285,8 @@ func (is *ObjectStorage) DeleteBlob(repo, digest string) error {
|
||||
|
||||
// remove cache entry and move blob contents to the next candidate if there is any
|
||||
if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest).Str("blobPath", blobPath).Msg("unable to remove blob path from cache")
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath).
|
||||
Msg("unable to remove blob path from cache")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
err = stwr.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -513,10 +513,10 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
_, err = imgStore.PutBlobChunk(testImage, upload, 0, int64(buflen), buf)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.DeleteBlob(testImage, digest.String())
|
||||
err = imgStore.DeleteBlob(testImage, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.DeleteBlobUpload(testImage, upload)
|
||||
@ -534,7 +534,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
_, _, err = imgStore.FullBlobUpload(testImage, bytes.NewBuffer([]byte{}), "inexistent")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob(testImage, digest.String())
|
||||
_, _, err = imgStore.CheckBlob(testImage, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -750,7 +750,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte("test"))
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d.String())
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -765,7 +765,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte("test"))
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d.String())
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -776,7 +776,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte("test"))
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d.String())
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -787,7 +787,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d.String())
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -798,14 +798,14 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d.String())
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Test FullBlobUpload2", func(c C) {
|
||||
imgStore = createMockStorage(testDir, tdir, false, &StorageDriverMock{})
|
||||
d := godigest.FromBytes([]byte(" "))
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d.String())
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -816,7 +816,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d.String())
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -827,7 +827,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
_, _, err := imgStore.GetBlob(testImage, d.String(), "")
|
||||
_, _, err := imgStore.GetBlob(testImage, d, "")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -838,7 +838,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
err := imgStore.DeleteBlob(testImage, d.String())
|
||||
err := imgStore.DeleteBlob(testImage, d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -849,7 +849,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
_, err := imgStore.GetReferrers(testImage, d.String(), "application/image")
|
||||
_, err := imgStore.GetReferrers(testImage, d, "application/image")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(err, ShouldEqual, zerr.ErrMethodNotSupported)
|
||||
})
|
||||
@ -883,18 +883,18 @@ func TestS3Dedupe(t *testing.T) {
|
||||
blob, err := imgStore.PutBlobChunkStreamed("dedupe1", upload, buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
blobDigest1 := strings.Split(digest.String(), ":")[1]
|
||||
blobDigest1 := digest
|
||||
So(blobDigest1, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
_, checkBlobSize1, err := imgStore.CheckBlob("dedupe1", digest.String())
|
||||
_, checkBlobSize1, err := imgStore.CheckBlob("dedupe1", digest)
|
||||
So(checkBlobSize1, ShouldBeGreaterThan, 0)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
blobReadCloser, getBlobSize1, err := imgStore.GetBlob("dedupe1", digest.String(),
|
||||
blobReadCloser, getBlobSize1, err := imgStore.GetBlob("dedupe1", digest,
|
||||
"application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(getBlobSize1, ShouldBeGreaterThan, 0)
|
||||
So(err, ShouldBeNil)
|
||||
@ -902,10 +902,10 @@ func TestS3Dedupe(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("dedupe1", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("dedupe1", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob("dedupe1", cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob("dedupe1", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -948,18 +948,18 @@ func TestS3Dedupe(t *testing.T) {
|
||||
blob, err = imgStore.PutBlobChunkStreamed("dedupe2", upload, buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
blobDigest2 := strings.Split(digest.String(), ":")[1]
|
||||
blobDigest2 := digest
|
||||
So(blobDigest2, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
_, checkBlobSize2, err := imgStore.CheckBlob("dedupe2", digest.String())
|
||||
_, checkBlobSize2, err := imgStore.CheckBlob("dedupe2", digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(checkBlobSize2, ShouldBeGreaterThan, 0)
|
||||
|
||||
blobReadCloser, getBlobSize2, err := imgStore.GetBlob("dedupe2", digest.String(),
|
||||
blobReadCloser, getBlobSize2, err := imgStore.GetBlob("dedupe2", digest,
|
||||
"application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
So(getBlobSize2, ShouldBeGreaterThan, 0)
|
||||
@ -969,10 +969,10 @@ func TestS3Dedupe(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe2", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe2", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe2", cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe2", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1001,10 +1001,12 @@ func TestS3Dedupe(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest("dedupe2", digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fi1, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256", blobDigest1))
|
||||
fi1, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256",
|
||||
blobDigest1.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fi2, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256", blobDigest2))
|
||||
fi2, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256",
|
||||
blobDigest2.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// original blob should have the real content of blob
|
||||
@ -1015,23 +1017,26 @@ func TestS3Dedupe(t *testing.T) {
|
||||
|
||||
Convey("Check that delete blobs moves the real content to the next contenders", func() {
|
||||
// if we delete blob1, the content should be moved to blob2
|
||||
err = imgStore.DeleteBlob("dedupe1", "sha256:"+blobDigest1)
|
||||
err = imgStore.DeleteBlob("dedupe1", blobDigest1)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256", blobDigest1))
|
||||
_, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256",
|
||||
blobDigest1.Encoded()))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
fi2, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256", blobDigest2))
|
||||
fi2, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256",
|
||||
blobDigest2.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(fi2.Size(), ShouldBeGreaterThan, 0)
|
||||
// the second blob should now be equal to the deleted blob.
|
||||
So(fi2.Size(), ShouldEqual, fi1.Size())
|
||||
|
||||
err = imgStore.DeleteBlob("dedupe2", "sha256:"+blobDigest2)
|
||||
err = imgStore.DeleteBlob("dedupe2", blobDigest2)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256", blobDigest2))
|
||||
_, err = storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256",
|
||||
blobDigest2.Encoded()))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -1062,43 +1067,43 @@ func TestS3Dedupe(t *testing.T) {
|
||||
blob, err = imgStore.PutBlobChunkStreamed("dedupe3", upload, buf)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
blobDigest2 := strings.Split(digest.String(), ":")[1]
|
||||
blobDigest2 := digest
|
||||
So(blobDigest2, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dedupe3", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dedupe3", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("dedupe3", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("dedupe3", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// check that we retrieve the real dedupe2/blob (which is deduped earlier - 0 size) when switching to dedupe false
|
||||
blobReadCloser, getBlobSize2, err = imgStore.GetBlob("dedupe2", digest.String(),
|
||||
blobReadCloser, getBlobSize2, err = imgStore.GetBlob("dedupe2", digest,
|
||||
"application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
So(getBlobSize1, ShouldEqual, getBlobSize2)
|
||||
err = blobReadCloser.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, checkBlobSize2, err := imgStore.CheckBlob("dedupe2", digest.String())
|
||||
_, checkBlobSize2, err := imgStore.CheckBlob("dedupe2", digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(checkBlobSize2, ShouldBeGreaterThan, 0)
|
||||
So(checkBlobSize2, ShouldEqual, getBlobSize2)
|
||||
|
||||
_, getBlobSize3, err := imgStore.GetBlob("dedupe3", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
_, getBlobSize3, err := imgStore.GetBlob("dedupe3", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
So(getBlobSize1, ShouldEqual, getBlobSize3)
|
||||
|
||||
_, checkBlobSize3, err := imgStore.CheckBlob("dedupe3", digest.String())
|
||||
_, checkBlobSize3, err := imgStore.CheckBlob("dedupe3", digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(checkBlobSize3, ShouldBeGreaterThan, 0)
|
||||
So(checkBlobSize3, ShouldEqual, getBlobSize3)
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe3", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload("dedupe3", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe3", cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("dedupe3", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -1127,14 +1132,17 @@ func TestS3Dedupe(t *testing.T) {
|
||||
_, _, _, err = imgStore.GetImageManifest("dedupe3", digest.String())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fi1, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256", blobDigest1))
|
||||
fi1, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe1", "blobs", "sha256",
|
||||
blobDigest1.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fi2, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256", blobDigest1))
|
||||
fi2, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe2", "blobs", "sha256",
|
||||
blobDigest1.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
So(fi2.Size(), ShouldEqual, 0)
|
||||
|
||||
fi3, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe3", "blobs", "sha256", blobDigest2))
|
||||
fi3, err := storeDriver.Stat(context.Background(), path.Join(testDir, "dedupe3", "blobs", "sha256",
|
||||
blobDigest2.Encoded()))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// the new blob with dedupe false should be equal with the origin blob from dedupe1
|
||||
@ -1171,54 +1179,54 @@ func TestS3PullRange(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
Convey("Without Dedupe", func() {
|
||||
reader, _, _, err := imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, -1)
|
||||
reader, _, _, err := imgStore.GetBlobPartial("index", digest, "*/*", 0, -1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err := io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "application/octet-stream", 0, -1)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "application/octet-stream", 0, -1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, 100)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 0, 100)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, 10)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 0, 10)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, 0)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 0, 0)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content[0:1])
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, 1)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 0, 1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content[0:2])
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 2, 3)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 2, 3)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
@ -1241,53 +1249,53 @@ func TestS3PullRange(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("dupindex", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("dupindex", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
reader, _, _, err := imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 0, -1)
|
||||
reader, _, _, err := imgStore.GetBlobPartial("dupindex", digest, "*/*", 0, -1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err := io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "application/octet-stream", 0, -1)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "application/octet-stream", 0, -1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 0, 100)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 0, 100)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 0, 10)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 0, 10)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content)
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 0, 0)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 0, 0)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content[0:1])
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 0, 1)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 0, 1)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
So(rdbuf, ShouldResemble, content[0:2])
|
||||
reader.Close()
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 2, 3)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 2, 3)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
@ -1295,10 +1303,10 @@ func TestS3PullRange(t *testing.T) {
|
||||
reader.Close()
|
||||
|
||||
// delete original blob
|
||||
err = imgStore.DeleteBlob("index", digest.String())
|
||||
err = imgStore.DeleteBlob("index", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest.String(), "*/*", 2, 3)
|
||||
reader, _, _, err = imgStore.GetBlobPartial("dupindex", digest, "*/*", 2, 3)
|
||||
So(err, ShouldBeNil)
|
||||
rdbuf, err = io.ReadAll(reader)
|
||||
So(err, ShouldBeNil)
|
||||
@ -1313,7 +1321,7 @@ func TestS3PullRange(t *testing.T) {
|
||||
content := []byte("invalid content")
|
||||
digest := godigest.FromBytes(content)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial("index", digest.String(), "*/*", 0, -1)
|
||||
_, _, _, err = imgStore.GetBlobPartial("index", digest, "*/*", 0, -1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
@ -1349,7 +1357,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
bdgst1 := digest
|
||||
bsize1 := len(content)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1365,7 +1373,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1407,7 +1415,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1449,7 +1457,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1513,7 +1521,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1663,7 +1671,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("index", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
@ -1909,7 +1917,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("repo", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -1930,7 +1938,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("repo", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -1948,7 +1956,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("repo", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -1985,10 +1993,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
_, _, err = imgStore.GetBlob("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
_, _, err = imgStore.GetBlob("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2032,10 +2040,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
_, _, err = imgStore.GetBlob("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
_, _, err = imgStore.GetBlob("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2055,10 +2063,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
_, _, err = imgStore.GetBlob("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
_, _, err = imgStore.GetBlob("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
_, _, _, err = imgStore.GetBlobPartial("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2090,10 +2098,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
err := imgStore.DedupeBlob("repo", digest, blobPath)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("repo2", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("repo2", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = imgStore.DeleteBlob("repo", digest.String())
|
||||
err = imgStore.DeleteBlob("repo", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2105,7 +2113,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d.String())
|
||||
_, _, err := imgStore.FullBlobUpload(testImage, io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
@ -2117,7 +2125,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||
},
|
||||
})
|
||||
d := godigest.FromBytes([]byte(""))
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d.String())
|
||||
err := imgStore.FinishBlobUpload(testImage, "uuid", io.NopCloser(strings.NewReader("")), d)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func checkIntegrity(ctx context.Context, imageName, tagName string, oci casext.E
|
||||
}
|
||||
|
||||
// check layer
|
||||
layerPath := path.Join(dir, "blobs", layer.Digest.Algorithm().String(), layer.Digest.Hex())
|
||||
layerPath := path.Join(dir, "blobs", layer.Digest.Algorithm().String(), layer.Digest.Encoded())
|
||||
|
||||
_, err = os.Stat(layerPath)
|
||||
if err != nil {
|
||||
|
@ -50,6 +50,9 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
|
||||
const tag = "1.0"
|
||||
|
||||
var manifestDigest godigest.Digest
|
||||
var configDigest godigest.Digest
|
||||
var layerDigest godigest.Digest
|
||||
var manifest string
|
||||
var config string
|
||||
var layer string
|
||||
@ -59,10 +62,11 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
buf := bytes.NewBuffer(body)
|
||||
buflen := buf.Len()
|
||||
digest := godigest.FromBytes(body)
|
||||
upload, n, err := imgStore.FullBlobUpload(repoName, buf, digest.String())
|
||||
upload, n, err := imgStore.FullBlobUpload(repoName, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(n, ShouldEqual, len(body))
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
layerDigest = digest
|
||||
layer = digest.String()
|
||||
|
||||
// create config digest
|
||||
@ -92,8 +96,8 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
}`, created, created, created))
|
||||
configBuf := bytes.NewBuffer(configBody)
|
||||
configLen := configBuf.Len()
|
||||
configDigest := godigest.FromBytes(configBody)
|
||||
uConfig, nConfig, err := imgStore.FullBlobUpload(repoName, configBuf, configDigest.String())
|
||||
configDigest = godigest.FromBytes(configBody)
|
||||
uConfig, nConfig, err := imgStore.FullBlobUpload(repoName, configBuf, configDigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(nConfig, ShouldEqual, len(configBody))
|
||||
So(uConfig, ShouldNotBeEmpty)
|
||||
@ -122,10 +126,12 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
mbytes, err := json.Marshal(mnfst)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifest, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest,
|
||||
manifestDigest, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest,
|
||||
mbytes)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifest = manifestDigest.String()
|
||||
|
||||
Convey("Blobs integrity not affected", func() {
|
||||
buff := bytes.NewBufferString("")
|
||||
|
||||
@ -171,7 +177,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
|
||||
Convey("Config integrity affected", func() {
|
||||
// get content of config file
|
||||
content, err := imgStore.GetBlobContent(repoName, config)
|
||||
content, err := imgStore.GetBlobContent(repoName, configDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// delete content of config file
|
||||
@ -199,7 +205,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
|
||||
|
||||
Convey("Layers integrity affected", func() {
|
||||
// get content of layer
|
||||
content, err := imgStore.GetBlobContent(repoName, layer)
|
||||
content, err := imgStore.GetBlobContent(repoName, layerDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// delete content of layer file
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
@ -27,8 +27,8 @@ type ImageStore interface { //nolint:interfacebloat
|
||||
GetRepositories() ([]string, error)
|
||||
GetNextRepository(repo string) (string, error)
|
||||
GetImageTags(repo string) ([]string, error)
|
||||
GetImageManifest(repo, reference string) ([]byte, string, string, error)
|
||||
PutImageManifest(repo, reference, mediaType string, body []byte) (string, error)
|
||||
GetImageManifest(repo, reference string) ([]byte, godigest.Digest, string, error)
|
||||
PutImageManifest(repo, reference, mediaType string, body []byte) (godigest.Digest, error)
|
||||
DeleteImageManifest(repo, reference string) error
|
||||
BlobUploadPath(repo, uuid string) string
|
||||
NewBlobUpload(repo string) (string, error)
|
||||
@ -36,18 +36,19 @@ type ImageStore interface { //nolint:interfacebloat
|
||||
PutBlobChunkStreamed(repo, uuid string, body io.Reader) (int64, error)
|
||||
PutBlobChunk(repo, uuid string, from, to int64, body io.Reader) (int64, error)
|
||||
BlobUploadInfo(repo, uuid string) (int64, error)
|
||||
FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error
|
||||
FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error)
|
||||
DedupeBlob(src string, dstDigest digest.Digest, dst string) error
|
||||
FinishBlobUpload(repo, uuid string, body io.Reader, digest godigest.Digest) error
|
||||
FullBlobUpload(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
||||
DedupeBlob(src string, dstDigest godigest.Digest, dst string) error
|
||||
DeleteBlobUpload(repo, uuid string) error
|
||||
BlobPath(repo string, digest digest.Digest) string
|
||||
CheckBlob(repo, digest string) (bool, int64, error)
|
||||
GetBlob(repo, digest, mediaType string) (io.ReadCloser, int64, error)
|
||||
GetBlobPartial(repo, digest, mediaType string, from, to int64) (io.ReadCloser, int64, int64, error)
|
||||
DeleteBlob(repo, digest string) error
|
||||
BlobPath(repo string, digest godigest.Digest) string
|
||||
CheckBlob(repo string, digest godigest.Digest) (bool, int64, error)
|
||||
GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error)
|
||||
GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error)
|
||||
DeleteBlob(repo string, digest godigest.Digest) error
|
||||
GetIndexContent(repo string) ([]byte, error)
|
||||
GetBlobContent(repo, digest string) ([]byte, error)
|
||||
GetReferrers(repo, digest string, mediaType string) ([]artifactspec.Descriptor, error)
|
||||
GetBlobContent(repo string, digest godigest.Digest) ([]byte, error)
|
||||
GetReferrers(repo string, digest godigest.Digest, mediaType string) ([]artifactspec.Descriptor, error)
|
||||
RunGCRepo(repo string) error
|
||||
RunGCPeriodically(interval time.Duration, sch *scheduler.Scheduler)
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
body := []byte("this is a blob")
|
||||
buf := bytes.NewBuffer(body)
|
||||
digest := godigest.FromBytes(body)
|
||||
upload, n, err := imgStore.FullBlobUpload("test", buf, digest.String())
|
||||
upload, n, err := imgStore.FullBlobUpload("test", buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(n, ShouldEqual, len(body))
|
||||
So(upload, ShouldNotBeEmpty)
|
||||
@ -238,13 +238,13 @@ func TestStorageAPIs(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(bupload, ShouldEqual, secondChunkLen)
|
||||
|
||||
err = imgStore.FinishBlobUpload("test", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("test", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("test", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("test", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
blob, _, err := imgStore.GetBlob("test", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
blob, _, err := imgStore.GetBlob("test", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
err = blob.Close()
|
||||
So(err, ShouldBeNil)
|
||||
@ -280,10 +280,10 @@ func TestStorageAPIs(t *testing.T) {
|
||||
|
||||
Convey("Good image manifest", func() {
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob("test", cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob("test", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -353,7 +353,7 @@ func TestStorageAPIs(t *testing.T) {
|
||||
So(len(tags), ShouldEqual, 2)
|
||||
|
||||
// We deleted only one tag, make sure blob should not be removed.
|
||||
hasBlob, _, err = imgStore.CheckBlob("test", digest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("test", digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -366,17 +366,17 @@ func TestStorageAPIs(t *testing.T) {
|
||||
So(len(tags), ShouldEqual, 0)
|
||||
|
||||
// All tags/references are deleted, blob should not be present in disk.
|
||||
hasBlob, _, err = imgStore.CheckBlob("test", digest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("test", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
So(hasBlob, ShouldEqual, false)
|
||||
|
||||
err = imgStore.DeleteBlob("test", "inexistent")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")).String())
|
||||
err = imgStore.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.DeleteBlob("test", blobDigest.String())
|
||||
err = imgStore.DeleteBlob("test", blobDigest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
|
||||
@ -394,9 +394,6 @@ func TestStorageAPIs(t *testing.T) {
|
||||
So(bupload, ShouldNotBeEmpty)
|
||||
|
||||
Convey("Get blob upload", func() {
|
||||
err = imgStore.FinishBlobUpload("test", bupload, bytes.NewBuffer([]byte{}), "inexistent")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
upload, err := imgStore.GetBlobUpload("test", "invalid")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(upload, ShouldEqual, -1)
|
||||
@ -423,28 +420,28 @@ func TestStorageAPIs(t *testing.T) {
|
||||
_, err = imgStore.PutBlobChunkStreamed("test", "inexistent", buf)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.FinishBlobUpload("test", "inexistent", buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("test", "inexistent", buf, digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
err = imgStore.FinishBlobUpload("test", bupload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("test", bupload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.CheckBlob("test", digest.String())
|
||||
_, _, err = imgStore.CheckBlob("test", digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
_, _, err = imgStore.GetBlob("test", "inexistent", "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
blob, _, err := imgStore.GetBlob("test", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
blob, _, err := imgStore.GetBlob("test", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
So(err, ShouldBeNil)
|
||||
err = blob.Close()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
blobContent, err := imgStore.GetBlobContent("test", digest.String())
|
||||
blobContent, err := imgStore.GetBlobContent("test", digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(content, ShouldResemble, blobContent)
|
||||
|
||||
_, err = imgStore.GetBlobContent("inexistent", digest.String())
|
||||
_, err = imgStore.GetBlobContent("inexistent", digest)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
manifest := ispec.Manifest{}
|
||||
@ -475,10 +472,10 @@ func TestStorageAPIs(t *testing.T) {
|
||||
|
||||
Convey("Good image manifest", func() {
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob("test", cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob("test", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -567,15 +564,15 @@ func TestStorageAPIs(t *testing.T) {
|
||||
blobDigest1 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest1, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("replace", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("replace", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("replace", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("replace", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err := imgStore.CheckBlob("replace", cdigest.String())
|
||||
hasBlob, _, err := imgStore.CheckBlob("replace", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -619,15 +616,15 @@ func TestStorageAPIs(t *testing.T) {
|
||||
blobDigest2 := strings.Split(digest.String(), ":")[1]
|
||||
So(blobDigest2, ShouldNotBeEmpty)
|
||||
|
||||
err = imgStore.FinishBlobUpload("replace", upload, buf, digest.String())
|
||||
err = imgStore.FinishBlobUpload("replace", upload, buf, digest)
|
||||
So(err, ShouldBeNil)
|
||||
So(blob, ShouldEqual, buflen)
|
||||
|
||||
cblob, cdigest = test.GetRandomImageConfig()
|
||||
_, clen, err = imgStore.FullBlobUpload("replace", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err = imgStore.FullBlobUpload("replace", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
hasBlob, _, err = imgStore.CheckBlob("replace", cdigest.String())
|
||||
hasBlob, _, err = imgStore.CheckBlob("replace", cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(hasBlob, ShouldEqual, true)
|
||||
|
||||
@ -728,11 +725,11 @@ func TestMandatoryAnnotations(t *testing.T) {
|
||||
buflen := buf.Len()
|
||||
digest := godigest.FromBytes(content)
|
||||
|
||||
_, _, err := imgStore.FullBlobUpload("test", bytes.NewReader(buf.Bytes()), digest.String())
|
||||
_, _, err := imgStore.FullBlobUpload("test", bytes.NewReader(buf.Bytes()), digest)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cblob, cdigest := test.GetRandomImageConfig()
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest.String())
|
||||
_, clen, err := imgStore.FullBlobUpload("test", bytes.NewReader(cblob), cdigest)
|
||||
So(err, ShouldBeNil)
|
||||
So(clen, ShouldEqual, len(cblob))
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
@ -267,7 +267,7 @@ func TestUploadImage(t *testing.T) {
|
||||
test.WaitTillServerReady(baseURL)
|
||||
|
||||
layerBlob := []byte("test")
|
||||
layerBlobDigest := digest.FromBytes(layerBlob)
|
||||
layerBlobDigest := godigest.FromBytes(layerBlob)
|
||||
layerPath := path.Join(tempDir, "test", "blobs", "sha256")
|
||||
|
||||
if _, err := os.Stat(layerPath); os.IsNotExist(err) {
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
|
||||
|
||||
"zotregistry.io/zot/pkg/scheduler"
|
||||
@ -18,8 +18,8 @@ type MockedImageStore struct {
|
||||
GetRepositoriesFn func() ([]string, error)
|
||||
GetNextRepositoryFn func(repo string) (string, error)
|
||||
GetImageTagsFn func(repo string) ([]string, error)
|
||||
GetImageManifestFn func(repo string, reference string) ([]byte, string, string, error)
|
||||
PutImageManifestFn func(repo string, reference string, mediaType string, body []byte) (string, error)
|
||||
GetImageManifestFn func(repo string, reference string) ([]byte, godigest.Digest, string, error)
|
||||
PutImageManifestFn func(repo string, reference string, mediaType string, body []byte) (godigest.Digest, error)
|
||||
DeleteImageManifestFn func(repo string, reference string) error
|
||||
BlobUploadPathFn func(repo string, uuid string) string
|
||||
NewBlobUploadFn func(repo string) (string, error)
|
||||
@ -27,19 +27,19 @@ type MockedImageStore struct {
|
||||
BlobUploadInfoFn func(repo string, uuid string) (int64, error)
|
||||
PutBlobChunkStreamedFn func(repo string, uuid string, body io.Reader) (int64, error)
|
||||
PutBlobChunkFn func(repo string, uuid string, from int64, to int64, body io.Reader) (int64, error)
|
||||
FinishBlobUploadFn func(repo string, uuid string, body io.Reader, digest string) error
|
||||
FullBlobUploadFn func(repo string, body io.Reader, digest string) (string, int64, error)
|
||||
DedupeBlobFn func(src string, dstDigest digest.Digest, dst string) error
|
||||
FinishBlobUploadFn func(repo string, uuid string, body io.Reader, digest godigest.Digest) error
|
||||
FullBlobUploadFn func(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
||||
DedupeBlobFn func(src string, dstDigest godigest.Digest, dst string) error
|
||||
DeleteBlobUploadFn func(repo string, uuid string) error
|
||||
BlobPathFn func(repo string, digest digest.Digest) string
|
||||
CheckBlobFn func(repo string, digest string) (bool, int64, error)
|
||||
GetBlobPartialFn func(repo string, digest string, mediaType string, from, to int64,
|
||||
BlobPathFn func(repo string, digest godigest.Digest) string
|
||||
CheckBlobFn func(repo string, digest godigest.Digest) (bool, int64, error)
|
||||
GetBlobPartialFn func(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error)
|
||||
GetBlobFn func(repo string, digest string, mediaType string) (io.ReadCloser, int64, error)
|
||||
DeleteBlobFn func(repo string, digest string) error
|
||||
GetBlobFn func(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error)
|
||||
DeleteBlobFn func(repo string, digest godigest.Digest) error
|
||||
GetIndexContentFn func(repo string) ([]byte, error)
|
||||
GetBlobContentFn func(repo, digest string) ([]byte, error)
|
||||
GetReferrersFn func(repo, digest string, mediaType string) ([]artifactspec.Descriptor, error)
|
||||
GetBlobContentFn func(repo string, digest godigest.Digest) ([]byte, error)
|
||||
GetReferrersFn func(repo string, digest godigest.Digest, mediaType string) ([]artifactspec.Descriptor, error)
|
||||
URLForPathFn func(path string) (string, error)
|
||||
RunGCRepoFn func(repo string) error
|
||||
RunGCPeriodicallyFn func(interval time.Duration, sch *scheduler.Scheduler)
|
||||
@ -105,7 +105,7 @@ func (is MockedImageStore) GetNextRepository(repo string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) GetImageManifest(repo string, reference string) ([]byte, string, string, error) {
|
||||
func (is MockedImageStore) GetImageManifest(repo string, reference string) ([]byte, godigest.Digest, string, error) {
|
||||
if is.GetImageManifestFn != nil {
|
||||
return is.GetImageManifestFn(repo, reference)
|
||||
}
|
||||
@ -118,7 +118,7 @@ func (is MockedImageStore) PutImageManifest(
|
||||
reference string,
|
||||
mediaType string,
|
||||
body []byte,
|
||||
) (string, error) {
|
||||
) (godigest.Digest, error) {
|
||||
if is.PutImageManifestFn != nil {
|
||||
return is.PutImageManifestFn(repo, reference, mediaType, body)
|
||||
}
|
||||
@ -196,7 +196,7 @@ func (is MockedImageStore) PutBlobChunk(
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error {
|
||||
func (is MockedImageStore) FinishBlobUpload(repo string, uuid string, body io.Reader, digest godigest.Digest) error {
|
||||
if is.FinishBlobUploadFn != nil {
|
||||
return is.FinishBlobUploadFn(repo, uuid, body, digest)
|
||||
}
|
||||
@ -204,7 +204,7 @@ func (is MockedImageStore) FinishBlobUpload(repo string, uuid string, body io.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) {
|
||||
func (is MockedImageStore) FullBlobUpload(repo string, body io.Reader, digest godigest.Digest) (string, int64, error) {
|
||||
if is.FullBlobUploadFn != nil {
|
||||
return is.FullBlobUploadFn(repo, body, digest)
|
||||
}
|
||||
@ -212,7 +212,7 @@ func (is MockedImageStore) FullBlobUpload(repo string, body io.Reader, digest st
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) DedupeBlob(src string, dstDigest digest.Digest, dst string) error {
|
||||
func (is MockedImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
|
||||
if is.DedupeBlobFn != nil {
|
||||
return is.DedupeBlobFn(src, dstDigest, dst)
|
||||
}
|
||||
@ -220,7 +220,7 @@ func (is MockedImageStore) DedupeBlob(src string, dstDigest digest.Digest, dst s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) DeleteBlob(repo string, digest string) error {
|
||||
func (is MockedImageStore) DeleteBlob(repo string, digest godigest.Digest) error {
|
||||
if is.DeleteBlobFn != nil {
|
||||
return is.DeleteBlobFn(repo, digest)
|
||||
}
|
||||
@ -228,7 +228,7 @@ func (is MockedImageStore) DeleteBlob(repo string, digest string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) BlobPath(repo string, digest digest.Digest) string {
|
||||
func (is MockedImageStore) BlobPath(repo string, digest godigest.Digest) string {
|
||||
if is.BlobPathFn != nil {
|
||||
return is.BlobPathFn(repo, digest)
|
||||
}
|
||||
@ -236,7 +236,7 @@ func (is MockedImageStore) BlobPath(repo string, digest digest.Digest) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is MockedImageStore) CheckBlob(repo string, digest string) (bool, int64, error) {
|
||||
func (is MockedImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {
|
||||
if is.CheckBlobFn != nil {
|
||||
return is.CheckBlobFn(repo, digest)
|
||||
}
|
||||
@ -244,7 +244,7 @@ func (is MockedImageStore) CheckBlob(repo string, digest string) (bool, int64, e
|
||||
return true, 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) GetBlobPartial(repo string, digest string, mediaType string, from, to int64,
|
||||
func (is MockedImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
|
||||
) (io.ReadCloser, int64, int64, error) {
|
||||
if is.GetBlobPartialFn != nil {
|
||||
return is.GetBlobPartialFn(repo, digest, mediaType, from, to)
|
||||
@ -253,7 +253,8 @@ func (is MockedImageStore) GetBlobPartial(repo string, digest string, mediaType
|
||||
return io.NopCloser(&io.LimitedReader{}), 0, 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) GetBlob(repo string, digest string, mediaType string) (io.ReadCloser, int64, error) {
|
||||
func (is MockedImageStore) GetBlob(repo string, digest godigest.Digest, mediaType string,
|
||||
) (io.ReadCloser, int64, error) {
|
||||
if is.GetBlobFn != nil {
|
||||
return is.GetBlobFn(repo, digest, mediaType)
|
||||
}
|
||||
@ -261,9 +262,9 @@ func (is MockedImageStore) GetBlob(repo string, digest string, mediaType string)
|
||||
return io.NopCloser(&io.LimitedReader{}), 0, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) DeleteBlobUpload(repo string, digest string) error {
|
||||
func (is MockedImageStore) DeleteBlobUpload(repo string, uuid string) error {
|
||||
if is.DeleteBlobUploadFn != nil {
|
||||
return is.DeleteBlobUploadFn(repo, digest)
|
||||
return is.DeleteBlobUploadFn(repo, uuid)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -277,7 +278,7 @@ func (is MockedImageStore) GetIndexContent(repo string) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (is MockedImageStore) GetBlobContent(repo string, digest string) ([]byte, error) {
|
||||
func (is MockedImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {
|
||||
if is.GetBlobContentFn != nil {
|
||||
return is.GetBlobContentFn(repo, digest)
|
||||
}
|
||||
@ -287,7 +288,7 @@ func (is MockedImageStore) GetBlobContent(repo string, digest string) ([]byte, e
|
||||
|
||||
func (is MockedImageStore) GetReferrers(
|
||||
repo string,
|
||||
digest string,
|
||||
digest godigest.Digest,
|
||||
mediaType string,
|
||||
) ([]artifactspec.Descriptor, error) {
|
||||
if is.GetReferrersFn != nil {
|
||||
|
@ -1,7 +1,6 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
@ -9,10 +8,10 @@ import (
|
||||
)
|
||||
|
||||
type OciLayoutUtilsMock struct {
|
||||
GetImageManifestFn func(repo string, reference string) (ispec.Manifest, string, error)
|
||||
GetImageManifestsFn func(image string) ([]ispec.Descriptor, error)
|
||||
GetImageBlobManifestFn func(imageDir string, digest godigest.Digest) (v1.Manifest, error)
|
||||
GetImageInfoFn func(imageDir string, hash v1.Hash) (ispec.Image, error)
|
||||
GetImageManifestFn func(repo string, reference string) (ispec.Manifest, godigest.Digest, error)
|
||||
GetImageManifestsFn func(repo string) ([]ispec.Descriptor, error)
|
||||
GetImageBlobManifestFn func(repo string, digest godigest.Digest) (ispec.Manifest, error)
|
||||
GetImageInfoFn func(repo string, digest godigest.Digest) (ispec.Image, error)
|
||||
GetImageTagsWithTimestampFn func(repo string) ([]common.TagInfo, error)
|
||||
GetImagePlatformFn func(imageInfo ispec.Image) (string, string)
|
||||
GetImageVendorFn func(imageInfo ispec.Image) string
|
||||
@ -25,7 +24,8 @@ type OciLayoutUtilsMock struct {
|
||||
GetRepositoriesFn func() ([]string, error)
|
||||
}
|
||||
|
||||
func (olum OciLayoutUtilsMock) GetImageManifest(repo string, reference string) (ispec.Manifest, string, error) {
|
||||
func (olum OciLayoutUtilsMock) GetImageManifest(repo string, reference string,
|
||||
) (ispec.Manifest, godigest.Digest, error) {
|
||||
if olum.GetImageManifestFn != nil {
|
||||
return olum.GetImageManifestFn(repo, reference)
|
||||
}
|
||||
@ -41,25 +41,25 @@ func (olum OciLayoutUtilsMock) GetRepositories() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (olum OciLayoutUtilsMock) GetImageManifests(image string) ([]ispec.Descriptor, error) {
|
||||
func (olum OciLayoutUtilsMock) GetImageManifests(repo string) ([]ispec.Descriptor, error) {
|
||||
if olum.GetImageManifestsFn != nil {
|
||||
return olum.GetImageManifestsFn(image)
|
||||
return olum.GetImageManifestsFn(repo)
|
||||
}
|
||||
|
||||
return []ispec.Descriptor{}, nil
|
||||
}
|
||||
|
||||
func (olum OciLayoutUtilsMock) GetImageBlobManifest(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
func (olum OciLayoutUtilsMock) GetImageBlobManifest(repo string, digest godigest.Digest) (ispec.Manifest, error) {
|
||||
if olum.GetImageBlobManifestFn != nil {
|
||||
return olum.GetImageBlobManifestFn(imageDir, digest)
|
||||
return olum.GetImageBlobManifestFn(repo, digest)
|
||||
}
|
||||
|
||||
return v1.Manifest{}, nil
|
||||
return ispec.Manifest{}, nil
|
||||
}
|
||||
|
||||
func (olum OciLayoutUtilsMock) GetImageInfo(imageDir string, hash v1.Hash) (ispec.Image, error) {
|
||||
func (olum OciLayoutUtilsMock) GetImageInfo(repo string, digest godigest.Digest) (ispec.Image, error) {
|
||||
if olum.GetImageInfoFn != nil {
|
||||
return olum.GetImageInfoFn(imageDir, hash)
|
||||
return olum.GetImageInfoFn(repo, digest)
|
||||
}
|
||||
|
||||
return ispec.Image{}, nil
|
||||
|
Loading…
x
Reference in New Issue
Block a user