lint: upgrade golangci-lint

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>
This commit is contained in:
Ramkumar Chinchani 2021-12-13 19:23:31 +00:00 committed by Ravi Chamarthy
parent 5f04092e71
commit ac3801ea2d
71 changed files with 3038 additions and 2575 deletions

View File

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
services:
s3mock:
image: localstack/localstack-full
image: ghcr.io/project-zot/localstack/localstack:0.13.2
env:
SERVICES: s3
ports:

43
.github/workflows/golangci-lint.yaml vendored Normal file
View File

@ -0,0 +1,43 @@
name: golangci-lint
on:
push:
tags:
- v*
branches:
- master
- main
pull_request:
permissions:
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
# pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: v1.43.0
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
args: --config ./golangcilint.yaml --enable-all --build-tags extended,containers_image_openpgp ./cmd/... ./pkg/...
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
# Optional: if set to true then the action will use pre-installed Go.
# skip-go-installation: true
# Optional: if set to true then the action don't cache or restore ~/go/pkg.
# skip-pkg-cache: true
# Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
# skip-build-cache: true

View File

@ -1,7 +1,7 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
FROM ghcr.io/project-zot/golang:1.16 AS builder
ARG COMMIT
RUN mkdir -p /go/src/github.com/project-zot/zot
WORKDIR /go/src/github.com/project-zot/zot

View File

@ -1,7 +1,7 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
FROM ghcr.io/project-zot/golang:1.16 AS builder
ARG COMMIT
ARG ARCH
RUN mkdir -p /go/src/github.com/project-zot/zot

View File

@ -1,7 +1,7 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
FROM ghcr.io/project-zot/golang:1.16 AS builder
ARG COMMIT
ARG ARCH
RUN mkdir -p /go/src/github.com/project-zot/zot

View File

@ -1,14 +1,14 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16-alpine AS builder
FROM ghcr.io/project-zot/golang:1.16 AS builder
ARG COMMIT
RUN apk --update add git make ca-certificates
RUN apt-get update && apt-get install -y git make ca-certificates
RUN mkdir -p /go/src/github.com/project-zot/zot
WORKDIR /go/src/github.com/project-zot/zot
COPY . .
RUN make COMMIT=$COMMIT clean binary
RUN echo -e '# Default config file for zot server\n\
RUN echo '# Default config file for zot server\n\
http:\n\
address: 0.0.0.0\n\
port: 5000\n\

View File

@ -1,7 +1,7 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
FROM ghcr.io/project-zot/golang:1.16 AS builder
ARG COMMIT
RUN mkdir -p /go/src/github.com/project-zot/zot
WORKDIR /go/src/github.com/project-zot/zot

View File

@ -4,9 +4,11 @@ COMMIT_HASH=$(shell git describe --always --tags --long)
GO_VERSION=$(shell go version | awk '{print $$3}')
COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),$(COMMIT_HASH)-dirty,$(COMMIT_HASH))
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
PATH := bin:$(PATH)
TMPDIR := $(shell mktemp -d)
TOOLSDIR := hack/tools
PATH := bin:$(TOOLSDIR)/bin:$(PATH)
STACKER := $(shell which stacker)
GOLINTER := $(TOOLSDIR)/bin/golangci-lint
OS ?= linux
ARCH ?= amd64
@ -55,11 +57,15 @@ covhtml:
cat coverage-extended.txt coverage-minimal.txt > coverage.txt
go tool cover -html=coverage.txt -o coverage.html
$(GOLINTER):
mkdir -p $(TOOLSDIR)/bin
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TOOLSDIR)/bin v1.43.0
$(GOLINTER) version
.PHONY: check
check: ./golangcilint.yaml
golangci-lint --version || curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.26.0
golangci-lint --config ./golangcilint.yaml run --enable-all --build-tags extended,containers_image_openpgp ./...
golangci-lint --config ./golangcilint.yaml run --enable-all --build-tags minimal,containers_image_openpgp ./...
check: ./golangcilint.yaml $(GOLINTER)
$(GOLINTER) --config ./golangcilint.yaml run --enable-all --out-format=colored-line-number --build-tags minimal,containers_image_openpgp ./...
$(GOLINTER) --config ./golangcilint.yaml run --enable-all --out-format=colored-line-number --build-tags extended,containers_image_openpgp ./...
swagger/docs.go:
swag -v || go install github.com/swaggo/swag/cmd/swag
@ -76,6 +82,7 @@ update-licenses:
.PHONY: clean
clean:
rm -f bin/zot*
rm -rf hack
.PHONY: run
run: binary test

View File

@ -27,6 +27,7 @@ var (
ErrCacheMiss = errors.New("cache: miss")
ErrRequireCred = errors.New("ldap: bind credentials required")
ErrInvalidCred = errors.New("ldap: invalid credentials")
ErrEmptyJSON = errors.New("cli: config json is empty")
ErrInvalidArgs = errors.New("cli: Invalid Arguments")
ErrInvalidFlagsCombination = errors.New("cli: Invalid combination of flags")
ErrInvalidURL = errors.New("cli: invalid URL format")
@ -45,4 +46,5 @@ var (
ErrInvalidRepositoryName = errors.New("routes: not a repository name")
ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog")
ErrMethodNotSupported = errors.New("storage: method not supported")
ErrInvalidMetric = errors.New("metrics: invalid metric func")
)

View File

@ -5,15 +5,33 @@ run:
linters:
enable-all: true
disable: funlen,godox,gocognit
output:
format: colored-line-number
disable: funlen,gocognit,exhaustivestruct,paralleltest,forbidigo,ireturn,wrapcheck,exhaustive
linters-settings:
dupl:
# tokens count to trigger issue, 150 by default
threshold: 200
nestif:
# their are various nested if else, therefore specifying complexity as 26
min-complexity: 26
cyclop:
max-complexity: 40
skip-tests: true
varnamelen:
check-return: true
ignore-type-assert-ok: true
ignore-map-index-ok: true
ignore-chan-recv-ok: true
ignore-names:
- err
- ok
- gc
gomnd:
settings:
mnd:
checks: argument,case,condition,operation,return,assign
ignored-numbers: 10,64
gomoddirectives:
replace-allow-list:
- github.com/aquasecurity/fanal
- github.com/aquasecurity/trivy
- github.com/aquasecurity/trivy-db
- github.com/containers/image/v5

View File

@ -31,46 +31,50 @@ func AuthHandler(c *Controller) mux.MiddlewareFunc {
return basicAuthHandler(c)
}
func bearerAuthHandler(c *Controller) mux.MiddlewareFunc {
func bearerAuthHandler(ctlr *Controller) mux.MiddlewareFunc {
authorizer, err := auth.NewAuthorizer(&auth.AuthorizerOptions{
Realm: c.Config.HTTP.Auth.Bearer.Realm,
Service: c.Config.HTTP.Auth.Bearer.Service,
PublicKeyPath: c.Config.HTTP.Auth.Bearer.Cert,
Realm: ctlr.Config.HTTP.Auth.Bearer.Realm,
Service: ctlr.Config.HTTP.Auth.Bearer.Service,
PublicKeyPath: ctlr.Config.HTTP.Auth.Bearer.Cert,
AccessEntryType: bearerAuthDefaultAccessEntryType,
EmptyDefaultNamespace: true,
})
if err != nil {
c.Log.Panic().Err(err).Msg("error creating bearer authorizer")
ctlr.Log.Panic().Err(err).Msg("error creating bearer authorizer")
}
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
name := vars["name"]
header := r.Header.Get("Authorization")
header := request.Header.Get("Authorization")
action := auth.PullAction
if m := r.Method; m != http.MethodGet && m != http.MethodHead {
if m := request.Method; m != http.MethodGet && m != http.MethodHead {
action = auth.PushAction
}
permissions, err := authorizer.Authorize(header, action, name)
if err != nil {
c.Log.Error().Err(err).Msg("issue parsing Authorization header")
w.Header().Set("Content-Type", "application/json")
WriteJSON(w, http.StatusInternalServerError, NewErrorList(NewError(UNSUPPORTED)))
ctlr.Log.Error().Err(err).Msg("issue parsing Authorization header")
response.Header().Set("Content-Type", "application/json")
WriteJSON(response, http.StatusInternalServerError, NewErrorList(NewError(UNSUPPORTED)))
return
}
if !permissions.Allowed {
authFail(w, permissions.WWWAuthenticateHeader, 0)
authFail(response, permissions.WWWAuthenticateHeader, 0)
return
}
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
})
}
}
// nolint:gocyclo // we use closure making this a complex subroutine
func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
realm := c.Config.HTTP.Realm
func basicAuthHandler(ctlr *Controller) mux.MiddlewareFunc {
realm := ctlr.Config.HTTP.Realm
if realm == "" {
realm = "Authorization Required"
}
@ -78,55 +82,58 @@ func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
realm = "Basic realm=" + strconv.Quote(realm)
// no password based authN, if neither LDAP nor HTTP BASIC is enabled
if c.Config.HTTP.Auth == nil || (c.Config.HTTP.Auth.HTPasswd.Path == "" && c.Config.HTTP.Auth.LDAP == nil) {
if ctlr.Config.HTTP.Auth == nil ||
(ctlr.Config.HTTP.Auth.HTPasswd.Path == "" && ctlr.Config.HTTP.Auth.LDAP == nil) {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if c.Config.HTTP.AllowReadAccess &&
c.Config.HTTP.TLS.CACert != "" &&
r.TLS.VerifiedChains == nil &&
r.Method != http.MethodGet && r.Method != http.MethodHead {
authFail(w, realm, 5)
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
if ctlr.Config.HTTP.AllowReadAccess &&
ctlr.Config.HTTP.TLS.CACert != "" &&
request.TLS.VerifiedChains == nil &&
request.Method != http.MethodGet && request.Method != http.MethodHead {
authFail(response, realm, 5) //nolint:gomnd
return
}
if (r.Method != http.MethodGet && r.Method != http.MethodHead) && c.Config.HTTP.ReadOnly {
if (request.Method != http.MethodGet && request.Method != http.MethodHead) && ctlr.Config.HTTP.ReadOnly {
// Reject modification requests in read-only mode
w.WriteHeader(http.StatusMethodNotAllowed)
response.WriteHeader(http.StatusMethodNotAllowed)
return
}
// Process request
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
})
}
}
credMap := make(map[string]string)
delay := c.Config.HTTP.Auth.FailDelay
delay := ctlr.Config.HTTP.Auth.FailDelay
var ldapClient *LDAPClient
if c.Config.HTTP.Auth != nil {
if c.Config.HTTP.Auth.LDAP != nil {
l := c.Config.HTTP.Auth.LDAP
if ctlr.Config.HTTP.Auth != nil {
if ctlr.Config.HTTP.Auth.LDAP != nil {
ldapConfig := ctlr.Config.HTTP.Auth.LDAP
ldapClient = &LDAPClient{
Host: l.Address,
Port: l.Port,
UseSSL: !l.Insecure,
SkipTLS: !l.StartTLS,
Base: l.BaseDN,
BindDN: l.BindDN,
BindPassword: l.BindPassword,
UserFilter: fmt.Sprintf("(%s=%%s)", l.UserAttribute),
InsecureSkipVerify: l.SkipVerify,
ServerName: l.Address,
Log: c.Log,
SubtreeSearch: l.SubtreeSearch,
Host: ldapConfig.Address,
Port: ldapConfig.Port,
UseSSL: !ldapConfig.Insecure,
SkipTLS: !ldapConfig.StartTLS,
Base: ldapConfig.BaseDN,
BindDN: ldapConfig.BindDN,
BindPassword: ldapConfig.BindPassword,
UserFilter: fmt.Sprintf("(%s=%%s)", ldapConfig.UserAttribute),
InsecureSkipVerify: ldapConfig.SkipVerify,
ServerName: ldapConfig.Address,
Log: ctlr.Log,
SubtreeSearch: ldapConfig.SubtreeSearch,
}
if c.Config.HTTP.Auth.LDAP.CACert != "" {
caCert, err := ioutil.ReadFile(c.Config.HTTP.Auth.LDAP.CACert)
if ctlr.Config.HTTP.Auth.LDAP.CACert != "" {
caCert, err := ioutil.ReadFile(ctlr.Config.HTTP.Auth.LDAP.CACert)
if err != nil {
panic(err)
}
@ -141,7 +148,6 @@ func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
} else {
// default to system cert pool
caCertPool, err := x509.SystemCertPool()
if err != nil {
panic(errors.ErrBadCACert)
}
@ -150,15 +156,14 @@ func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
}
}
if c.Config.HTTP.Auth.HTPasswd.Path != "" {
f, err := os.Open(c.Config.HTTP.Auth.HTPasswd.Path)
if ctlr.Config.HTTP.Auth.HTPasswd.Path != "" {
credsFile, err := os.Open(ctlr.Config.HTTP.Auth.HTPasswd.Path)
if err != nil {
panic(err)
}
defer f.Close()
defer credsFile.Close()
scanner := bufio.NewScanner(f)
scanner := bufio.NewScanner(credsFile)
for scanner.Scan() {
line := scanner.Text()
@ -171,42 +176,48 @@ func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
}
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if (r.Method == http.MethodGet || r.Method == http.MethodHead) && c.Config.HTTP.AllowReadAccess {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
if (request.Method == http.MethodGet || request.Method == http.MethodHead) && ctlr.Config.HTTP.AllowReadAccess {
// Process request
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
return
}
if (r.Method != http.MethodGet && r.Method != http.MethodHead) && c.Config.HTTP.ReadOnly {
if (request.Method != http.MethodGet && request.Method != http.MethodHead) && ctlr.Config.HTTP.ReadOnly {
// Reject modification requests in read-only mode
w.WriteHeader(http.StatusMethodNotAllowed)
response.WriteHeader(http.StatusMethodNotAllowed)
return
}
basicAuth := r.Header.Get("Authorization")
basicAuth := request.Header.Get("Authorization")
if basicAuth == "" {
authFail(w, realm, delay)
authFail(response, realm, delay)
return
}
s := strings.SplitN(basicAuth, " ", 2)
splitStr := strings.SplitN(basicAuth, " ", 2) //nolint:gomnd
if len(splitStr) != 2 || strings.ToLower(splitStr[0]) != "basic" {
authFail(response, realm, delay)
if len(s) != 2 || strings.ToLower(s[0]) != "basic" {
authFail(w, realm, delay)
return
}
b, err := base64.StdEncoding.DecodeString(s[1])
decodedStr, err := base64.StdEncoding.DecodeString(splitStr[1])
if err != nil {
authFail(w, realm, delay)
authFail(response, realm, delay)
return
}
pair := strings.SplitN(string(b), ":", 2)
pair := strings.SplitN(string(decodedStr), ":", 2) //nolint:gomnd
// nolint:gomnd
if len(pair) != 2 {
authFail(w, realm, delay)
authFail(response, realm, delay)
return
}
@ -218,22 +229,24 @@ func basicAuthHandler(c *Controller) mux.MiddlewareFunc {
if ok {
if err := bcrypt.CompareHashAndPassword([]byte(passphraseHash), []byte(passphrase)); err == nil {
// Process request
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
return
}
}
// next, LDAP if configured (network-based which can lose connectivity)
if c.Config.HTTP.Auth != nil && c.Config.HTTP.Auth.LDAP != nil {
if ctlr.Config.HTTP.Auth != nil && ctlr.Config.HTTP.Auth.LDAP != nil {
ok, _, err := ldapClient.Authenticate(username, passphrase)
if ok && err == nil {
// Process request
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
return
}
}
authFail(w, realm, delay)
authFail(response, realm, delay)
})
}
}

View File

@ -69,7 +69,7 @@ func (ac *AccessController) can(username, action, repository string) bool {
can = isPermitted(username, action, pg)
}
//check admins based policy
// check admins based policy
if !can {
if ac.isAdmin(username) && contains(ac.Config.AdminPolicy.Actions, action) {
can = true
@ -85,7 +85,7 @@ func (ac *AccessController) isAdmin(username string) bool {
}
// getContext builds ac context(allowed to read repos and if user is admin) and returns it.
func (ac *AccessController) getContext(username string, r *http.Request) context.Context {
func (ac *AccessController) getContext(username string, request *http.Request) context.Context {
userAllowedRepos := ac.getReadRepos(username)
acCtx := AccessControlContext{userAllowedRepos: userAllowedRepos}
@ -95,25 +95,26 @@ func (ac *AccessController) getContext(username string, r *http.Request) context
acCtx.isAdmin = false
}
ctx := context.WithValue(r.Context(), authzCtxKey, acCtx)
ctx := context.WithValue(request.Context(), authzCtxKey, acCtx)
return ctx
}
// isPermitted returns true if username can do action on a repository policy.
func isPermitted(username, action string, pg config.PolicyGroup) bool {
func isPermitted(username, action string, policyGroup config.PolicyGroup) bool {
var result bool
// check repo/system based policies
for _, p := range pg.Policies {
for _, p := range policyGroup.Policies {
if contains(p.Users, username) && contains(p.Actions, action) {
result = true
break
}
}
// check defaultPolicy
if !result {
if contains(pg.DefaultPolicy, action) {
if contains(policyGroup.DefaultPolicy, action) {
result = true
}
}
@ -141,33 +142,34 @@ func containsRepo(slice []string, item string) bool {
return false
}
func AuthzHandler(c *Controller) mux.MiddlewareFunc {
func AuthzHandler(ctlr *Controller) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
resource := vars["name"]
reference, ok := vars["reference"]
ac := NewAccessController(c.Config)
username := getUsername(r)
ctx := ac.getContext(username, r)
acCtrlr := NewAccessController(ctlr.Config)
username := getUsername(request)
ctx := acCtrlr.getContext(username, request)
if request.RequestURI == "/v2/_catalog" || request.RequestURI == "/v2/" {
next.ServeHTTP(response, request.WithContext(ctx))
if r.RequestURI == "/v2/_catalog" || r.RequestURI == "/v2/" {
next.ServeHTTP(w, r.WithContext(ctx))
return
}
var action string
if r.Method == http.MethodGet || r.Method == http.MethodHead {
if request.Method == http.MethodGet || request.Method == http.MethodHead {
action = READ
}
if r.Method == http.MethodPut || r.Method == http.MethodPatch || r.Method == http.MethodPost {
if request.Method == http.MethodPut || request.Method == http.MethodPatch || request.Method == http.MethodPost {
// assume user wants to create
action = CREATE
// if we get a reference (tag)
if ok {
is := c.StoreController.GetImageStore(resource)
is := ctlr.StoreController.GetImageStore(resource)
tags, err := is.GetImageTags(resource)
// if repo exists and request's tag doesn't exist yet then action is UPDATE
if err == nil && contains(tags, reference) && reference != "latest" {
@ -176,15 +178,15 @@ func AuthzHandler(c *Controller) mux.MiddlewareFunc {
}
}
if r.Method == http.MethodDelete {
if request.Method == http.MethodDelete {
action = DELETE
}
can := ac.can(username, action, resource)
can := acCtrlr.can(username, action, resource)
if !can {
authzFail(w, c.Config.HTTP.Realm, c.Config.HTTP.Auth.FailDelay)
authzFail(response, ctlr.Config.HTTP.Realm, ctlr.Config.HTTP.Auth.FailDelay)
} else {
next.ServeHTTP(w, r.WithContext(ctx))
next.ServeHTTP(response, request.WithContext(ctx))
}
})
}
@ -193,9 +195,9 @@ func AuthzHandler(c *Controller) mux.MiddlewareFunc {
func getUsername(r *http.Request) string {
// this should work because it worked in auth middleware
basicAuth := r.Header.Get("Authorization")
s := strings.SplitN(basicAuth, " ", 2)
s := strings.SplitN(basicAuth, " ", 2) //nolint:gomnd
b, _ := base64.StdEncoding.DecodeString(s[1])
pair := strings.SplitN(string(b), ":", 2)
pair := strings.SplitN(string(b), ":", 2) //nolint:gomnd
return pair[0]
}

View File

@ -129,22 +129,22 @@ func New() *Config {
// Sanitize makes a sanitized copy of the config removing any secrets.
func (c *Config) Sanitize() *Config {
s := &Config{}
if err := deepcopy.Copy(s, c); err != nil {
sanitizedConfig := &Config{}
if err := deepcopy.Copy(sanitizedConfig, c); err != nil {
panic(err)
}
if c.HTTP.Auth != nil && c.HTTP.Auth.LDAP != nil && c.HTTP.Auth.LDAP.BindPassword != "" {
s.HTTP.Auth.LDAP = &LDAPConfig{}
sanitizedConfig.HTTP.Auth.LDAP = &LDAPConfig{}
if err := deepcopy.Copy(s.HTTP.Auth.LDAP, c.HTTP.Auth.LDAP); err != nil {
if err := deepcopy.Copy(sanitizedConfig.HTTP.Auth.LDAP, c.HTTP.Auth.LDAP); err != nil {
panic(err)
}
s.HTTP.Auth.LDAP.BindPassword = "******"
sanitizedConfig.HTTP.Auth.LDAP.BindPassword = "******"
}
return s
return sanitizedConfig
}
func (c *Config) Validate(log log.Logger) error {
@ -153,6 +153,7 @@ func (c *Config) Validate(log log.Logger) error {
l := c.HTTP.Auth.LDAP
if l.UserAttribute == "" {
log.Error().Str("userAttribute", l.UserAttribute).Msg("invalid LDAP configuration")
return errors.ErrLDAPConfig
}
}
@ -169,12 +170,12 @@ func (c *Config) LoadAccessControlConfig() error {
c.AccessControl = &AccessControlConfig{}
c.AccessControl.Repositories = make(map[string]PolicyGroup)
for k := range c.HTTP.RawAccessControl {
for policy := range c.HTTP.RawAccessControl {
var policies []Policy
var policyGroup PolicyGroup
if k == "adminpolicy" {
if policy == "adminpolicy" {
adminPolicy := viper.GetStringMapStringSlice("http.accessControl.adminPolicy")
c.AccessControl.AdminPolicy.Actions = adminPolicy["actions"]
c.AccessControl.AdminPolicy.Users = adminPolicy["users"]
@ -182,15 +183,15 @@ func (c *Config) LoadAccessControlConfig() error {
continue
}
err := viper.UnmarshalKey(fmt.Sprintf("http.accessControl.%s.policies", k), &policies)
err := viper.UnmarshalKey(fmt.Sprintf("http.accessControl.%s.policies", policy), &policies)
if err != nil {
return err
}
defaultPolicy := viper.GetStringSlice(fmt.Sprintf("http.accessControl.%s.defaultPolicy", k))
defaultPolicy := viper.GetStringSlice(fmt.Sprintf("http.accessControl.%s.defaultPolicy", policy))
policyGroup.Policies = policies
policyGroup.DefaultPolicy = defaultPolicy
c.AccessControl.Repositories[k] = policyGroup
c.AccessControl.Repositories[policy] = policyGroup
}
return nil

View File

@ -11,6 +11,7 @@ import (
goSync "sync"
"time"
"github.com/docker/distribution/registry/storage/driver/factory"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"zotregistry.io/zot/errors"
@ -20,8 +21,6 @@ import (
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/s3"
"github.com/docker/distribution/registry/storage/driver/factory"
)
const (
@ -58,13 +57,13 @@ func NewController(config *config.Config) *Controller {
func DefaultHeaders() mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
// CORS
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
response.Header().Set("Access-Control-Allow-Origin", "*")
response.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
// handle the request
next.ServeHTTP(w, r)
next.ServeHTTP(response, request)
})
}
}
@ -73,6 +72,7 @@ func (c *Controller) Run() error {
// validate configuration
if err := c.Config.Validate(c.Log); err != nil {
c.Log.Error().Err(err).Msg("configuration validation failed")
return err
}
@ -118,7 +118,7 @@ func (c *Controller) Run() error {
c.Server = server
// Create the listener
l, err := net.Listen("tcp", addr)
listener, err := net.Listen("tcp", addr)
if err != nil {
return err
}
@ -147,13 +147,13 @@ func (c *Controller) Run() error {
PreferServerCipherSuites: true,
MinVersion: tls.VersionTLS12,
}
server.TLSConfig.BuildNameToCertificate() // nolint: staticcheck
server.TLSConfig.BuildNameToCertificate()
}
return server.ServeTLS(l, c.Config.HTTP.TLS.Cert, c.Config.HTTP.TLS.Key)
return server.ServeTLS(listener, c.Config.HTTP.TLS.Cert, c.Config.HTTP.TLS.Key)
}
return server.Serve(l)
return server.Serve(listener)
}
func (c *Controller) InitImageStore() error {
@ -184,6 +184,7 @@ func (c *Controller) InitImageStore() error {
store, err := factory.Create(storeName, c.Config.Storage.StorageDriver)
if err != nil {
c.Log.Error().Err(err).Str("rootDir", c.Config.Storage.RootDirectory).Msg("unable to create s3 service")
return err
}
@ -235,6 +236,7 @@ func (c *Controller) InitImageStore() error {
store, err := factory.Create(storeName, storageConfig.StorageDriver)
if err != nil {
c.Log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service")
return err
}

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@ type ErrorList struct {
type ErrorCode int
// nolint: golint, stylecheck
// nolint: golint, stylecheck, revive
const (
BLOB_UNKNOWN ErrorCode = iota
BLOB_UPLOAD_INVALID
@ -37,7 +37,7 @@ const (
)
func (e ErrorCode) String() string {
m := map[ErrorCode]string{
errMap := map[ErrorCode]string{
BLOB_UNKNOWN: "BLOB_UNKNOWN",
BLOB_UPLOAD_INVALID: "BLOB_UPLOAD_INVALID",
BLOB_UPLOAD_UNKNOWN: "BLOB_UPLOAD_UNKNOWN",
@ -55,11 +55,11 @@ func (e ErrorCode) String() string {
UNSUPPORTED: "UNSUPPORTED",
}
return m[e]
return errMap[e]
}
func NewError(code ErrorCode, detail ...interface{}) Error { //nolint: interfacer
var errMap = map[ErrorCode]Error{
errMap := map[ErrorCode]Error{
BLOB_UNKNOWN: {
Message: "blob unknown to registry",
Description: "blob unknown to registry This error MAY be returned when a blob is unknown " +
@ -154,25 +154,25 @@ func NewError(code ErrorCode, detail ...interface{}) Error { //nolint: interface
},
}
e, ok := errMap[code]
err, ok := errMap[code]
if !ok {
panic(errors.ErrUnknownCode)
}
e.Code = code.String()
e.Detail = detail
err.Code = code.String()
err.Detail = detail
return e
return err
}
func NewErrorList(errors ...Error) ErrorList {
el := make([]*Error, 0)
er := Error{}
errList := make([]*Error, 0)
err := Error{}
for _, e := range errors {
er = e
el = append(el, &er)
err = e
errList = append(errList, &err)
}
return ErrorList{el}
return ErrorList{errList}
}

View File

@ -3,17 +3,14 @@
package api
import (
"sync"
"time"
"crypto/tls"
"crypto/x509"
"fmt"
goldap "github.com/go-ldap/ldap/v3"
"zotregistry.io/zot/errors"
"sync"
"time"
"github.com/go-ldap/ldap/v3"
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/log"
)
@ -41,16 +38,17 @@ type LDAPClient struct {
// Connect connects to the ldap backend.
func (lc *LDAPClient) Connect() error {
if lc.Conn == nil {
var l *goldap.Conn
var l *ldap.Conn
var err error
address := fmt.Sprintf("%s:%d", lc.Host, lc.Port)
if !lc.UseSSL {
l, err = goldap.Dial("tcp", address)
l, err = ldap.Dial("tcp", address)
if err != nil {
lc.Log.Error().Err(err).Str("address", address).Msg("non-TLS connection failed")
return err
}
@ -60,15 +58,17 @@ func (lc *LDAPClient) Connect() error {
InsecureSkipVerify: lc.InsecureSkipVerify, // nolint: gosec // InsecureSkipVerify is not true by default
RootCAs: lc.ClientCAs,
}
if lc.ClientCertificates != nil && len(lc.ClientCertificates) > 0 {
config.Certificates = lc.ClientCertificates
config.BuildNameToCertificate() // nolint: staticcheck
config.BuildNameToCertificate()
}
err = l.StartTLS(config)
if err != nil {
lc.Log.Error().Err(err).Str("address", address).Msg("TLS connection failed")
return err
}
}
@ -80,11 +80,12 @@ func (lc *LDAPClient) Connect() error {
}
if lc.ClientCertificates != nil && len(lc.ClientCertificates) > 0 {
config.Certificates = lc.ClientCertificates
config.BuildNameToCertificate() // nolint: staticcheck
config.BuildNameToCertificate()
}
l, err = goldap.DialTLS("tcp", address, config)
l, err = ldap.DialTLS("tcp", address, config)
if err != nil {
lc.Log.Error().Err(err).Str("address", address).Msg("TLS connection failed")
return err
}
}
@ -112,6 +113,7 @@ func sleepAndRetry(retries, maxRetries int) bool {
if retries < maxRetries {
time.Sleep(time.Duration(retries) * time.Second) // gradually backoff
return true
}
@ -155,25 +157,27 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
// exhausted all retries?
if !connected {
lc.Log.Error().Err(errors.ErrLDAPBadConn).Msg("exhausted all retries")
return false, nil, errors.ErrLDAPBadConn
}
attributes := append(lc.Attributes, "dn")
searchScope := goldap.ScopeSingleLevel
attributes := lc.Attributes
attributes = append(attributes, "dn")
searchScope := ldap.ScopeSingleLevel
if lc.SubtreeSearch {
searchScope = goldap.ScopeWholeSubtree
searchScope = ldap.ScopeWholeSubtree
}
// Search for the given username
searchRequest := goldap.NewSearchRequest(
searchRequest := ldap.NewSearchRequest(
lc.Base,
searchScope, goldap.NeverDerefAliases, 0, 0, false,
searchScope, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf(lc.UserFilter, username),
attributes,
nil,
)
sr, err := lc.Conn.Search(searchRequest)
search, err := lc.Conn.Search(searchRequest)
if err != nil {
fmt.Printf("%v\n", err)
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
@ -182,7 +186,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
return false, nil, err
}
if len(sr.Entries) < 1 {
if len(search.Entries) < 1 {
err := errors.ErrBadUser
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
Str("baseDN", lc.Base).Msg("entries not found")
@ -190,7 +194,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
return false, nil, err
}
if len(sr.Entries) > 1 {
if len(search.Entries) > 1 {
err := errors.ErrEntriesExceeded
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
Str("baseDN", lc.Base).Msg("too many entries")
@ -198,17 +202,18 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
return false, nil, err
}
userDN := sr.Entries[0].DN
userDN := search.Entries[0].DN
user := map[string]string{}
for _, attr := range lc.Attributes {
user[attr] = sr.Entries[0].GetAttributeValue(attr)
user[attr] = search.Entries[0].GetAttributeValue(attr)
}
// Bind as the user to verify their password
err = lc.Conn.Bind(userDN, password)
if err != nil {
lc.Log.Error().Err(err).Str("bindDN", userDN).Msg("user bind failed")
return false, user, err
}

View File

@ -29,19 +29,19 @@ var (
)
// match compiles the string to a regular expression.
// nolint (gochecknoglobals)
// nolint: gochecknoglobals
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
regx := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
if _, complete := regx.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
return regx
}
// expression defines a full expression, where each regular expression must

File diff suppressed because it is too large Load Diff

View File

@ -35,20 +35,20 @@ func (w *statusWriter) Write(b []byte) (int, error) {
}
// SessionLogger logs session details.
func SessionLogger(c *Controller) mux.MiddlewareFunc {
l := c.Log.With().Str("module", "http").Logger()
func SessionLogger(ctlr *Controller) mux.MiddlewareFunc {
logger := ctlr.Log.With().Str("module", "http").Logger()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
// Start timer
start := time.Now()
path := r.URL.Path
raw := r.URL.RawQuery
path := request.URL.Path
raw := request.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
stwr := statusWriter{ResponseWriter: response}
// Process request
next.ServeHTTP(&sw, r)
next.ServeHTTP(&stwr, request)
// Stop timer
end := time.Now()
@ -57,22 +57,20 @@ func SessionLogger(c *Controller) mux.MiddlewareFunc {
// Truncate in a golang < 1.8 safe way
latency -= latency % time.Second
}
clientIP := r.RemoteAddr
method := r.Method
clientIP := request.RemoteAddr
method := request.Method
headers := map[string][]string{}
username := ""
log := l.Info()
for key, value := range r.Header {
log := logger.Info()
for key, value := range request.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
s := strings.SplitN(value[0], " ", 2) //nolint:gomnd
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
pair := strings.SplitN(string(b), ":", 2) //nolint:gomnd
// nolint:gomnd
if len(pair) == 2 {
username = pair[0]
log = log.Str("username", username)
log = log.Str("username", pair[0])
}
}
}
@ -80,8 +78,8 @@ func SessionLogger(c *Controller) mux.MiddlewareFunc {
}
headers[key] = value
}
statusCode := sw.status
bodySize := sw.length
statusCode := stwr.status
bodySize := stwr.length
if raw != "" {
path = path + "?" + raw
}
@ -89,9 +87,9 @@ func SessionLogger(c *Controller) mux.MiddlewareFunc {
if path != "/v2/metrics" {
// In order to test metrics feture,the instrumentation related to node exporter
// should be handled by node exporter itself (ex: latency)
monitoring.IncHTTPConnRequests(c.Metrics, method, strconv.Itoa(statusCode))
monitoring.ObserveHTTPRepoLatency(c.Metrics, path, latency) // summary
monitoring.ObserveHTTPMethodLatency(c.Metrics, method, latency) // histogram
monitoring.IncHTTPConnRequests(ctlr.Metrics, method, strconv.Itoa(statusCode))
monitoring.ObserveHTTPRepoLatency(ctlr.Metrics, path, latency) // summary
monitoring.ObserveHTTPMethodLatency(ctlr.Metrics, method, latency) // histogram
}
log.Str("clientIP", clientIP).
@ -108,28 +106,27 @@ func SessionLogger(c *Controller) mux.MiddlewareFunc {
func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
raw := r.URL.RawQuery
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
path := request.URL.Path
raw := request.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
statusWr := statusWriter{ResponseWriter: response}
// Process request
next.ServeHTTP(&sw, r)
next.ServeHTTP(&statusWr, request)
clientIP := r.RemoteAddr
method := r.Method
clientIP := request.RemoteAddr
method := request.Method
username := ""
for key, value := range r.Header {
for key, value := range request.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
s := strings.SplitN(value[0], " ", 2) //nolint:gomnd
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
// nolint:gomnd
if len(pair) == 2 {
pair := strings.SplitN(string(b), ":", 2) //nolint:gomnd
if len(pair) == 2 { //nolint:gomnd
username = pair[0]
}
}
@ -137,7 +134,7 @@ func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc {
}
}
statusCode := sw.status
statusCode := statusWr.status
if raw != "" {
path = path + "?" + raw
}

View File

@ -23,8 +23,10 @@ import (
"zotregistry.io/zot/pkg/storage"
)
var httpClientsMap = make(map[string]*http.Client) //nolint: gochecknoglobals
var httpClientLock sync.Mutex //nolint: gochecknoglobals
var (
httpClientsMap = make(map[string]*http.Client) //nolint: gochecknoglobals
httpClientLock sync.Mutex //nolint: gochecknoglobals
)
const (
httpTimeout = 5 * time.Minute
@ -36,13 +38,13 @@ const (
)
func createHTTPClient(verifyTLS bool, host string) *http.Client {
var tr = http.DefaultTransport.(*http.Transport).Clone()
htr := http.DefaultTransport.(*http.Transport).Clone()
if !verifyTLS {
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint: gosec
htr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint: gosec
return &http.Client{
Timeout: httpTimeout,
Transport: tr,
Transport: htr,
}
}
@ -51,20 +53,20 @@ func createHTTPClient(verifyTLS bool, host string) *http.Client {
tlsConfig := loadPerHostCerts(caCertPool, host)
if tlsConfig == nil {
tlsConfig = &tls.Config{RootCAs: caCertPool}
tlsConfig = &tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12}
}
tr = &http.Transport{TLSClientConfig: tlsConfig}
htr = &http.Transport{TLSClientConfig: tlsConfig}
return &http.Client{
Timeout: httpTimeout,
Transport: tr,
Transport: htr,
}
}
func makeGETRequest(url, username, password string, verifyTLS bool, resultsPtr interface{}) (http.Header, error) {
req, err := http.NewRequest("GET", url, nil)
func makeGETRequest(ctx context.Context, url, username, password string,
verifyTLS bool, resultsPtr interface{}) (http.Header, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}
@ -74,9 +76,9 @@ func makeGETRequest(url, username, password string, verifyTLS bool, resultsPtr i
return doHTTPRequest(req, verifyTLS, resultsPtr)
}
func makeGraphQLRequest(url, query, username,
func makeGraphQLRequest(ctx context.Context, url, query, username,
password string, verifyTLS bool, resultsPtr interface{}) error {
req, err := http.NewRequest("GET", url, bytes.NewBufferString(query))
req, err := http.NewRequestWithContext(ctx, "GET", url, bytes.NewBufferString(query))
if err != nil {
return err
}
@ -184,18 +186,20 @@ func getTLSConfig(certsPath string, caCertPool *x509.CertPool) (*tls.Config, err
return &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
MinVersion: tls.VersionTLS12,
}, nil
}
func isURL(str string) bool {
u, err := url.Parse(str)
return err == nil && u.Scheme != "" && u.Host != ""
} // from https://stackoverflow.com/a/55551215
type requestsPool struct {
jobs chan *manifestJob
done chan struct{}
waitGroup *sync.WaitGroup
wtgrp *sync.WaitGroup
outputCh chan stringResult
context context.Context
}
@ -212,14 +216,14 @@ type manifestJob struct {
const rateLimiterBuffer = 5000
func newSmoothRateLimiter(ctx context.Context, wg *sync.WaitGroup, op chan stringResult) *requestsPool {
func newSmoothRateLimiter(ctx context.Context, wtgrp *sync.WaitGroup, opch chan stringResult) *requestsPool {
ch := make(chan *manifestJob, rateLimiterBuffer)
return &requestsPool{
jobs: ch,
done: make(chan struct{}),
waitGroup: wg,
outputCh: op,
wtgrp: wtgrp,
outputCh: opch,
context: ctx,
}
}
@ -227,15 +231,15 @@ func newSmoothRateLimiter(ctx context.Context, wg *sync.WaitGroup, op chan strin
// block every "rateLimit" time duration.
const rateLimit = 100 * time.Millisecond
func (p *requestsPool) startRateLimiter() {
p.waitGroup.Done()
func (p *requestsPool) startRateLimiter(ctx context.Context) {
p.wtgrp.Done()
throttle := time.NewTicker(rateLimit).C
for {
select {
case job := <-p.jobs:
go p.doJob(job)
go p.doJob(ctx, job)
case <-p.done:
return
}
@ -243,12 +247,13 @@ func (p *requestsPool) startRateLimiter() {
}
}
func (p *requestsPool) doJob(job *manifestJob) {
defer p.waitGroup.Done()
func (p *requestsPool) doJob(ctx context.Context, job *manifestJob) {
defer p.wtgrp.Done()
header, err := makeGETRequest(job.url, job.username, job.password, *job.config.verifyTLS, &job.manifestResp)
header, err := makeGETRequest(ctx, job.url, job.username, job.password,
*job.config.verifyTLS, &job.manifestResp)
if err != nil {
if isContextDone(p.context) {
if isContextDone(ctx) {
return
}
p.outputCh <- stringResult{"", err}
@ -291,7 +296,7 @@ func (p *requestsPool) doJob(job *manifestJob) {
str, err := image.string(*job.config.outputFormat)
if err != nil {
if isContextDone(p.context) {
if isContextDone(ctx) {
return
}
p.outputCh <- stringResult{"", err}
@ -299,7 +304,7 @@ func (p *requestsPool) doJob(job *manifestJob) {
return
}
if isContextDone(p.context) {
if isContextDone(ctx) {
return
}

View File

@ -49,7 +49,7 @@ func TestTLSWithAuth(t *testing.T) {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort1
@ -68,16 +68,16 @@ func TestTLSWithAuth(t *testing.T) {
CACert: CACert,
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -93,7 +93,7 @@ func TestTLSWithAuth(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Test with htpassw auth", func() {
@ -155,7 +155,7 @@ func TestTLSWithoutAuth(t *testing.T) {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort1
@ -165,16 +165,16 @@ func TestTLSWithoutAuth(t *testing.T) {
CACert: CACert,
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -190,7 +190,7 @@ func TestTLSWithoutAuth(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Certs in user's home", func() {
@ -223,7 +223,7 @@ func TestTLSWithoutAuth(t *testing.T) {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort2
@ -233,16 +233,16 @@ func TestTLSWithoutAuth(t *testing.T) {
CACert: CACert,
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -258,7 +258,7 @@ func TestTLSWithoutAuth(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Certs in privileged path", func() {
@ -286,7 +286,7 @@ func TestTLSBadCerts(t *testing.T) {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort3
@ -296,16 +296,16 @@ func TestTLSBadCerts(t *testing.T) {
CACert: CACert,
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -321,7 +321,7 @@ func TestTLSBadCerts(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Test with system certs", func() {

View File

@ -14,10 +14,13 @@ import (
"text/tabwriter"
jsoniter "github.com/json-iterator/go"
zotErrors "zotregistry.io/zot/errors"
"github.com/spf13/cobra"
zerr "zotregistry.io/zot/errors"
)
const (
defaultConfigPerms = 0o644
defaultFilePerms = 0o600
)
func NewConfigCommand() *cobra.Command {
@ -25,7 +28,7 @@ func NewConfigCommand() *cobra.Command {
var isReset bool
var configCmd = &cobra.Command{
configCmd := &cobra.Command{
Use: "config <config-name> [variable] [value]",
Example: examples,
Short: "Configure zot CLI",
@ -51,7 +54,7 @@ func NewConfigCommand() *cobra.Command {
return nil
}
return zotErrors.ErrInvalidArgs
return zerr.ErrInvalidArgs
case oneArg:
// zot config <name> -l
if isListing {
@ -65,7 +68,7 @@ func NewConfigCommand() *cobra.Command {
return nil
}
return zotErrors.ErrInvalidArgs
return zerr.ErrInvalidArgs
case twoArgs:
if isReset { // zot config <name> <key> --reset
return resetConfigValue(configPath, args[0], args[1])
@ -77,13 +80,13 @@ func NewConfigCommand() *cobra.Command {
}
fmt.Fprintln(cmd.OutOrStdout(), res)
case threeArgs:
//zot config <name> <key> <value>
// zot config <name> <key> <value>
if err := setConfigValue(configPath, args[0], args[1], args[2]); err != nil {
return err
}
default:
return zotErrors.ErrInvalidArgs
return zerr.ErrInvalidArgs
}
return nil
@ -99,7 +102,7 @@ func NewConfigCommand() *cobra.Command {
}
func NewConfigAddCommand() *cobra.Command {
var configAddCmd = &cobra.Command{
configAddCmd := &cobra.Command{
Use: "add <config-name> <url>",
Short: "Add configuration for a zot URL",
Long: `Configure CLI for interaction with a zot server`,
@ -125,7 +128,7 @@ func NewConfigAddCommand() *cobra.Command {
}
func getConfigMapFromFile(filePath string) ([]interface{}, error) {
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, 0644)
file, err := os.OpenFile(filePath, os.O_RDONLY|os.O_CREATE, defaultConfigPerms)
if err != nil {
return nil, err
}
@ -139,29 +142,29 @@ func getConfigMapFromFile(filePath string) ([]interface{}, error) {
var jsonMap map[string]interface{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json := jsoniter.ConfigCompatibleWithStandardLibrary
_ = json.Unmarshal(data, &jsonMap)
if jsonMap["configs"] == nil {
return nil, ErrEmptyJSON
return nil, zerr.ErrEmptyJSON
}
return jsonMap["configs"].([]interface{}), nil
}
func saveConfigMapToFile(filePath string, configMap []interface{}) error {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json := jsoniter.ConfigCompatibleWithStandardLibrary
listMap := make(map[string]interface{})
listMap["configs"] = configMap
marshalled, err := json.Marshal(&listMap)
marshalled, err := json.Marshal(&listMap)
if err != nil {
return err
}
if err := ioutil.WriteFile(filePath, marshalled, 0600); err != nil {
if err := ioutil.WriteFile(filePath, marshalled, defaultFilePerms); err != nil {
return err
}
@ -171,7 +174,7 @@ func saveConfigMapToFile(filePath string, configMap []interface{}) error {
func getConfigNames(configPath string) (string, error) {
configs, err := getConfigMapFromFile(configPath)
if err != nil {
if errors.Is(err, ErrEmptyJSON) {
if errors.Is(err, zerr.ErrEmptyJSON) {
return "", nil
}
@ -180,10 +183,14 @@ func getConfigNames(configPath string) (string, error) {
var builder strings.Builder
writer := tabwriter.NewWriter(&builder, 0, 8, 1, '\t', tabwriter.AlignRight)
writer := tabwriter.NewWriter(&builder, 0, 8, 1, '\t', tabwriter.AlignRight) //nolint:gomnd
for _, val := range configs {
configMap := val.(map[string]interface{})
configMap, ok := val.(map[string]interface{})
if !ok {
return "", zerr.ErrBadConfig
}
fmt.Fprintf(writer, "%s\t%s\n", configMap[nameKey], configMap["url"])
}
@ -197,16 +204,16 @@ func getConfigNames(configPath string) (string, error) {
func addConfig(configPath, configName, url string) error {
configs, err := getConfigMapFromFile(configPath)
if err != nil && !errors.Is(err, ErrEmptyJSON) {
if err != nil && !errors.Is(err, zerr.ErrEmptyJSON) {
return err
}
if !isURL(url) {
return zotErrors.ErrInvalidURL
return zerr.ErrInvalidURL
}
if configNameExists(configs, configName) {
return zotErrors.ErrDuplicateConfigName
return zerr.ErrDuplicateConfigName
}
configMap := make(map[string]interface{})
@ -236,15 +243,19 @@ func addDefaultConfigs(config map[string]interface{}) {
func getConfigValue(configPath, configName, key string) (string, error) {
configs, err := getConfigMapFromFile(configPath)
if err != nil {
if errors.Is(err, ErrEmptyJSON) {
return "", zotErrors.ErrConfigNotFound
if errors.Is(err, zerr.ErrEmptyJSON) {
return "", zerr.ErrConfigNotFound
}
return "", err
}
for _, val := range configs {
configMap := val.(map[string]interface{})
configMap, ok := val.(map[string]interface{})
if !ok {
return "", zerr.ErrBadConfig
}
addDefaultConfigs(configMap)
name := configMap[nameKey]
@ -257,25 +268,29 @@ func getConfigValue(configPath, configName, key string) (string, error) {
}
}
return "", zotErrors.ErrConfigNotFound
return "", zerr.ErrConfigNotFound
}
func resetConfigValue(configPath, configName, key string) error {
if key == "url" || key == nameKey {
return zotErrors.ErrCannotResetConfigKey
return zerr.ErrCannotResetConfigKey
}
configs, err := getConfigMapFromFile(configPath)
if err != nil {
if errors.Is(err, ErrEmptyJSON) {
return zotErrors.ErrConfigNotFound
if errors.Is(err, zerr.ErrEmptyJSON) {
return zerr.ErrConfigNotFound
}
return err
}
for _, val := range configs {
configMap := val.(map[string]interface{})
configMap, ok := val.(map[string]interface{})
if !ok {
return zerr.ErrBadConfig
}
addDefaultConfigs(configMap)
name := configMap[nameKey]
@ -291,25 +306,29 @@ func resetConfigValue(configPath, configName, key string) error {
}
}
return zotErrors.ErrConfigNotFound
return zerr.ErrConfigNotFound
}
func setConfigValue(configPath, configName, key, value string) error {
if key == nameKey {
return zotErrors.ErrIllegalConfigKey
return zerr.ErrIllegalConfigKey
}
configs, err := getConfigMapFromFile(configPath)
if err != nil {
if errors.Is(err, ErrEmptyJSON) {
return zotErrors.ErrConfigNotFound
if errors.Is(err, zerr.ErrEmptyJSON) {
return zerr.ErrConfigNotFound
}
return err
}
for _, val := range configs {
configMap := val.(map[string]interface{})
configMap, ok := val.(map[string]interface{})
if !ok {
return zerr.ErrBadConfig
}
addDefaultConfigs(configMap)
name := configMap[nameKey]
@ -330,13 +349,13 @@ func setConfigValue(configPath, configName, key, value string) error {
}
}
return zotErrors.ErrConfigNotFound
return zerr.ErrConfigNotFound
}
func getAllConfig(configPath, configName string) (string, error) {
configs, err := getConfigMapFromFile(configPath)
if err != nil {
if errors.Is(err, ErrEmptyJSON) {
if errors.Is(err, zerr.ErrEmptyJSON) {
return "", nil
}
@ -346,7 +365,11 @@ func getAllConfig(configPath, configName string) (string, error) {
var builder strings.Builder
for _, value := range configs {
configMap := value.(map[string]interface{})
configMap, ok := value.(map[string]interface{})
if !ok {
return "", zerr.ErrBadConfig
}
addDefaultConfigs(configMap)
name := configMap[nameKey]
@ -363,12 +386,16 @@ func getAllConfig(configPath, configName string) (string, error) {
}
}
return "", zotErrors.ErrConfigNotFound
return "", zerr.ErrConfigNotFound
}
func configNameExists(configs []interface{}, configName string) bool {
for _, val := range configs {
configMap := val.(map[string]interface{})
configMap, ok := val.(map[string]interface{})
if !ok {
return false
}
if configMap[nameKey] == configName {
return true
}
@ -399,7 +426,3 @@ Useful variables:
showspinnerConfig = "showspinner"
verifyTLSConfig = "verify-tls"
)
var (
ErrEmptyJSON = errors.New("cli: config json is empty")
)

View File

@ -11,9 +11,8 @@ import (
"strings"
"testing"
zotErrors "zotregistry.io/zot/errors"
. "github.com/smartystreets/goconvey/convey"
zotErrors "zotregistry.io/zot/errors"
)
func TestConfigCmdBasics(t *testing.T) {

View File

@ -8,10 +8,9 @@ import (
"os"
"path"
zotErrors "zotregistry.io/zot/errors"
"github.com/briandowns/spinner"
"github.com/spf13/cobra"
zotErrors "zotregistry.io/zot/errors"
)
func NewCveCommand(searchService SearchService) *cobra.Command {
@ -21,7 +20,7 @@ func NewCveCommand(searchService SearchService) *cobra.Command {
var isSpinner, verifyTLS, fixedFlag, verbose bool
var cveCmd = &cobra.Command{
cveCmd := &cobra.Command{
Use: "cve [config-name]",
Short: "Lookup CVEs in images hosted on zot",
Long: `List CVEs (Common Vulnerabilities and Exposures) of images hosted on a zot instance`,
@ -37,11 +36,14 @@ func NewCveCommand(searchService SearchService) *cobra.Command {
urlFromConfig, err := getConfigValue(configPath, args[0], "url")
if err != nil {
cmd.SilenceUsage = true
return err
}
if urlFromConfig == "" {
return zotErrors.ErrNoURLProvided
}
servURL = urlFromConfig
} else {
return zotErrors.ErrNoURLProvided
@ -53,11 +55,14 @@ func NewCveCommand(searchService SearchService) *cobra.Command {
isSpinner, err = parseBooleanConfig(configPath, args[0], showspinnerConfig)
if err != nil {
cmd.SilenceUsage = true
return err
}
verifyTLS, err = parseBooleanConfig(configPath, args[0], verifyTLSConfig)
if err != nil {
cmd.SilenceUsage = true
return err
}
}
@ -84,6 +89,7 @@ func NewCveCommand(searchService SearchService) *cobra.Command {
if err != nil {
cmd.SilenceUsage = true
return err
}
@ -106,7 +112,7 @@ func NewCveCommand(searchService SearchService) *cobra.Command {
func setupCveFlags(cveCmd *cobra.Command, variables cveFlagVariables) {
variables.searchCveParams["imageName"] = cveCmd.Flags().StringP("image", "I", "", "List CVEs by IMAGENAME[:TAG]")
variables.searchCveParams["cveID"] = cveCmd.Flags().StringP("cve-id", "i", "", "List images affected by a CVE")
variables.searchCveParams["cvid"] = cveCmd.Flags().StringP("cve-id", "i", "", "List images affected by a CVE")
cveCmd.Flags().StringVar(variables.servURL, "url", "", "Specify zot server URL if config-name is not mentioned")
cveCmd.Flags().StringVarP(variables.user, "user", "u", "", `User Credentials of `+

View File

@ -290,7 +290,7 @@ func TestServerCVEResponse(t *testing.T) {
url := GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
@ -304,7 +304,7 @@ func TestServerCVEResponse(t *testing.T) {
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
cveConfig := &extconf.CVEConfig{
UpdateInterval: 2,
}
@ -312,7 +312,7 @@ func TestServerCVEResponse(t *testing.T) {
CVE: cveConfig,
Enable: true,
}
c.Config.Extensions = &extconf.ExtensionConfig{
ctlr.Config.Extensions = &extconf.ExtensionConfig{
Search: searchConfig,
}
@ -321,7 +321,7 @@ func TestServerCVEResponse(t *testing.T) {
if err := controller.Run(); err != nil {
return
}
}(c)
}(ctlr)
// wait till ready
for {
res, err := resty.R().Get(url + "/query")
@ -336,7 +336,7 @@ func TestServerCVEResponse(t *testing.T) {
defer func(controller *api.Controller) {
ctx := context.Background()
_ = controller.Server.Shutdown(ctx)
}(c)
}(ctlr)
Convey("Test CVE by image name", t, func() {
args := []string{"cvetest", "--image", "zot-cve-test:0.0.1"}

View File

@ -21,7 +21,7 @@ func NewImageCommand(searchService SearchService) *cobra.Command {
var isSpinner, verifyTLS, verbose bool
var imageCmd = &cobra.Command{
imageCmd := &cobra.Command{
Use: "images [config-name]",
Short: "List hosted images",
Long: `List images hosted on zot`,
@ -37,11 +37,14 @@ func NewImageCommand(searchService SearchService) *cobra.Command {
urlFromConfig, err := getConfigValue(configPath, args[0], "url")
if err != nil {
cmd.SilenceUsage = true
return err
}
if urlFromConfig == "" {
return zotErrors.ErrNoURLProvided
}
servURL = urlFromConfig
} else {
return zotErrors.ErrNoURLProvided
@ -53,11 +56,14 @@ func NewImageCommand(searchService SearchService) *cobra.Command {
isSpinner, err = parseBooleanConfig(configPath, args[0], showspinnerConfig)
if err != nil {
cmd.SilenceUsage = true
return err
}
verifyTLS, err = parseBooleanConfig(configPath, args[0], verifyTLSConfig)
if err != nil {
cmd.SilenceUsage = true
return err
}
}
@ -81,6 +87,7 @@ func NewImageCommand(searchService SearchService) *cobra.Command {
if err != nil {
cmd.SilenceUsage = true
return err
}

View File

@ -289,20 +289,20 @@ func TestServerResponse(t *testing.T) {
conf.Extensions = &extconf.ExtensionConfig{
Search: &extconf.SearchConfig{Enable: true},
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
c.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RootDirectory = dir
go func(controller *api.Controller) {
// this blocks
if err := controller.Run(); err != nil {
return
}
}(c)
}(ctlr)
// wait till ready
for {
_, err := resty.R().Get(url)
@ -315,7 +315,7 @@ func TestServerResponse(t *testing.T) {
defer func(controller *api.Controller) {
ctx := context.Background()
_ = controller.Server.Shutdown(ctx)
}(c)
}(ctlr)
uploadManifest(url)
@ -470,7 +470,7 @@ func uploadManifest(url string) {
SetHeader("Content-Type", "application/octet-stream").SetBody(content).Put(loc)
// create a manifest
m := ispec.Manifest{
manifest := ispec.Manifest{
Config: ispec.Descriptor{
Digest: digest,
Size: int64(len(content)),
@ -483,15 +483,15 @@ func uploadManifest(url string) {
},
},
}
m.SchemaVersion = 2
content, _ = json.Marshal(m)
manifest.SchemaVersion = 2
content, _ = json.Marshal(manifest)
_, _ = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json").
SetBody(content).Put(url + "/v2/repo7/manifests/test:1.0")
content = []byte("this is a blob5")
digest = godigest.FromBytes(content)
// create a manifest with same blob but a different tag
m = ispec.Manifest{
manifest = ispec.Manifest{
Config: ispec.Descriptor{
Digest: digest,
Size: int64(len(content)),
@ -504,8 +504,8 @@ func uploadManifest(url string) {
},
},
}
m.SchemaVersion = 2
content, _ = json.Marshal(m)
manifest.SchemaVersion = 2
content, _ = json.Marshal(manifest)
_, _ = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json").
SetBody(content).Put(url + "/v2/repo7/manifests/test:2.0")
}
@ -513,8 +513,8 @@ func uploadManifest(url string) {
type mockService struct{}
func (service mockService) getAllImages(ctx context.Context, config searchConfig, username, password string,
channel chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
channel chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(channel)
image := &imageStruct{}
@ -530,14 +530,16 @@ func (service mockService) getAllImages(ctx context.Context, config searchConfig
str, err := image.string(*config.outputFormat)
if err != nil {
channel <- stringResult{"", err}
return
}
channel <- stringResult{str, nil}
}
func (service mockService) getImageByName(ctx context.Context, config searchConfig,
username, password, imageName string, channel chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
username, password, imageName string, channel chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(channel)
image := &imageStruct{}
@ -553,15 +555,17 @@ func (service mockService) getImageByName(ctx context.Context, config searchConf
str, err := image.string(*config.outputFormat)
if err != nil {
channel <- stringResult{"", err}
return
}
channel <- stringResult{str, nil}
}
func (service mockService) getCveByImage(ctx context.Context, config searchConfig, username, password,
imageName string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
cveRes := &cveResult{}
cveRes.Data = cveData{
@ -587,43 +591,45 @@ func (service mockService) getCveByImage(ctx context.Context, config searchConfi
str, err := cveRes.string(*config.outputFormat)
if err != nil {
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
c <- stringResult{str, nil}
rch <- stringResult{str, nil}
}
func (service mockService) getImagesByCveID(ctx context.Context, config searchConfig, username, password, cveID string,
c chan stringResult, wg *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, "anImage", c, wg)
func (service mockService) getImagesByCveID(ctx context.Context, config searchConfig, username, password, cvid string,
rch chan stringResult, wtgrp *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, "anImage", rch, wtgrp)
}
func (service mockService) getImagesByDigest(ctx context.Context, config searchConfig, username,
password, digest string, c chan stringResult, wg *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, "anImage", c, wg)
password, digest string, rch chan stringResult, wtgrp *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, "anImage", rch, wtgrp)
}
func (service mockService) getImageByNameAndCVEID(ctx context.Context, config searchConfig, username,
password, imageName, cveID string, c chan stringResult, wg *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, imageName, c, wg)
password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, imageName, rch, wtgrp)
}
func (service mockService) getFixedTagsForCVE(ctx context.Context, config searchConfig,
username, password, imageName, cveID string, c chan stringResult, wg *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, imageName, c, wg)
username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) {
service.getImageByName(ctx, config, username, password, imageName, rch, wtgrp)
}
func makeConfigFile(content string) string {
os.Setenv("HOME", os.TempDir())
home, err := os.UserHomeDir()
home, err := os.UserHomeDir()
if err != nil {
panic(err)
}
configPath := path.Join(home + "/.zot")
if err := ioutil.WriteFile(configPath, []byte(content), 0600); err != nil {
if err := ioutil.WriteFile(configPath, []byte(content), 0o600); err != nil {
panic(err)
}

View File

@ -1,6 +1,7 @@
package cli
import (
"context"
"fmt"
"net/http"
@ -25,10 +26,7 @@ func metadataConfig(md *mapstructure.Metadata) viper.DecoderConfigOption {
}
}
func NewRootCmd() *cobra.Command {
showVersion := false
conf := config.New()
func newServeCmd(conf *config.Config) *cobra.Command {
// "serve"
serveCmd := &cobra.Command{
Use: "serve <config>",
@ -39,7 +37,8 @@ func NewRootCmd() *cobra.Command {
if len(args) > 0 {
LoadConfiguration(conf, args[0])
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
// creates a new file watcher
watcher, err := fsnotify.NewWatcher()
@ -60,7 +59,7 @@ func NewRootCmd() *cobra.Command {
log.Info().Msg("config file changed, trying to reload accessControl config")
newConfig := config.New()
LoadConfiguration(newConfig, args[0])
c.Config.AccessControl = newConfig.AccessControl
ctlr.Config.AccessControl = newConfig.AccessControl
}
// watch for errors
case err := <-watcher.Errors:
@ -77,12 +76,16 @@ func NewRootCmd() *cobra.Command {
<-done
}()
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
panic(err)
}
},
}
return serveCmd
}
func newScrubCmd(conf *config.Config) *cobra.Command {
// "scrub"
scrubCmd := &cobra.Command{
Use: "scrub <config>",
@ -90,33 +93,40 @@ func NewRootCmd() *cobra.Command {
Short: "`scrub` checks manifest/blob integrity",
Long: "`scrub` checks manifest/blob integrity",
Run: func(cmd *cobra.Command, args []string) {
configuration := config.New()
if len(args) > 0 {
LoadConfiguration(configuration, args[0])
LoadConfiguration(conf, args[0])
} else {
if err := cmd.Usage(); err != nil {
panic(err)
}
return
}
// checking if the server is already running
response, err := http.Get(fmt.Sprintf("http://%s:%s/v2", configuration.HTTP.Address, configuration.HTTP.Port))
req, err := http.NewRequestWithContext(context.Background(),
http.MethodGet,
fmt.Sprintf("http://%s:%s/v2", conf.HTTP.Address, conf.HTTP.Port),
nil)
if err != nil {
log.Error().Err(err).Msg("unable to create a new http request")
panic(err)
}
response, err := http.DefaultClient.Do(req)
if err == nil {
response.Body.Close()
log.Info().Msg("The server is running, in order to perform the scrub command the server should be shut down")
panic("Error: server is running")
} else {
// server is down
c := api.NewController(configuration)
ctlr := api.NewController(conf)
if err := c.InitImageStore(); err != nil {
if err := ctlr.InitImageStore(); err != nil {
panic(err)
}
result, err := c.StoreController.CheckAllBlobsIntegrity()
result, err := ctlr.StoreController.CheckAllBlobsIntegrity()
if err != nil {
panic(err)
}
@ -126,6 +136,11 @@ func NewRootCmd() *cobra.Command {
},
}
return scrubCmd
}
func newVerifyCmd(conf *config.Config) *cobra.Command {
// verify
verifyCmd := &cobra.Command{
Use: "verify <config>",
Aliases: []string{"verify"},
@ -133,40 +148,18 @@ func NewRootCmd() *cobra.Command {
Long: "`verify` validates a zot config file",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
config := config.New()
LoadConfiguration(config, args[0])
LoadConfiguration(conf, args[0])
log.Info().Msgf("Config file %s is valid", args[0])
}
},
}
// "garbage-collect"
gcDelUntagged := false
gcDryRun := false
return verifyCmd
}
gcCmd := &cobra.Command{
Use: "garbage-collect <config>",
Aliases: []string{"gc"},
Short: "`garbage-collect` deletes layers not referenced by any manifests",
Long: "`garbage-collect` deletes layers not referenced by any manifests",
Run: func(cmd *cobra.Command, args []string) {
log.Info().Interface("values", conf).Msg("configuration settings")
if conf.Storage.RootDirectory != "" {
if err := storage.Scrub(conf.Storage.RootDirectory, gcDryRun); err != nil {
panic(err)
}
}
},
}
gcCmd.Flags().StringVarP(&conf.Storage.RootDirectory, "storage-root-dir", "r", "",
"Use specified directory for filestore backing image data")
_ = gcCmd.MarkFlagRequired("storage-root-dir")
gcCmd.Flags().BoolVarP(&gcDelUntagged, "delete-untagged", "m", false,
"delete manifests that are not currently referenced via tag")
gcCmd.Flags().BoolVarP(&gcDryRun, "dry-run", "d", false,
"do everything except remove the blobs")
func NewRootCmd() *cobra.Command {
showVersion := false
conf := config.New()
rootCmd := &cobra.Command{
Use: "zot",
@ -182,10 +175,9 @@ func NewRootCmd() *cobra.Command {
},
}
rootCmd.AddCommand(serveCmd)
rootCmd.AddCommand(scrubCmd)
rootCmd.AddCommand(gcCmd)
rootCmd.AddCommand(verifyCmd)
rootCmd.AddCommand(newServeCmd(conf))
rootCmd.AddCommand(newScrubCmd(conf))
rootCmd.AddCommand(newVerifyCmd(conf))
enableCli(rootCmd)
@ -202,13 +194,13 @@ func LoadConfiguration(config *config.Config, configPath string) {
panic(err)
}
md := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(md)); err != nil {
metaData := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil {
log.Error().Err(err).Msg("error while unmarshalling new config")
panic(err)
}
if len(md.Keys) == 0 || len(md.Unused) > 0 {
if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 {
log.Error().Err(errors.ErrBadConfig).Msg("bad configuration, retry writing it")
panic(errors.ErrBadConfig)
}

View File

@ -6,14 +6,12 @@ import (
"io/ioutil"
"os"
"path"
"gopkg.in/resty.v1"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
"github.com/spf13/viper"
"gopkg.in/resty.v1"
"zotregistry.io/zot/pkg/api"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/cli"
@ -217,18 +215,6 @@ func TestLoadConfig(t *testing.T) {
})
}
func TestGC(t *testing.T) {
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
Convey("Test gc", t, func(c C) {
os.Args = []string{"cli_test", "garbage-collect", "-h"}
err := cli.NewRootCmd().Execute()
So(err, ShouldBeNil)
})
}
func TestScrub(t *testing.T) {
oldArgs := os.Args
@ -365,20 +351,20 @@ func TestScrub(t *testing.T) {
panic(err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/blobs", repo), 0755); err != nil {
if err := os.MkdirAll(fmt.Sprintf("%s/blobs", repo), 0o755); err != nil {
panic(err)
}
if _, err = os.Stat(fmt.Sprintf("%s/oci-layout", repo)); err != nil {
content := []byte(`{"imageLayoutVersion": "1.0.0"}`)
if err = ioutil.WriteFile(fmt.Sprintf("%s/oci-layout", repo), content, 0600); err != nil {
if err = ioutil.WriteFile(fmt.Sprintf("%s/oci-layout", repo), content, 0o600); err != nil {
panic(err)
}
}
if _, err = os.Stat(fmt.Sprintf("%s/index.json", repo)); err != nil {
content := []byte(`not a JSON content`)
if err = ioutil.WriteFile(fmt.Sprintf("%s/index.json", repo), content, 0600); err != nil {
if err = ioutil.WriteFile(fmt.Sprintf("%s/index.json", repo), content, 0o600); err != nil {
panic(err)
}
}

View File

@ -84,7 +84,7 @@ func (search allImagesSearcher) search(config searchConfig) (bool, error) {
go config.searchService.getAllImages(ctx, config, username, password, imageErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, imageErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -115,7 +115,7 @@ func (search imageByNameSearcher) search(config searchConfig) (bool, error) {
*config.params["imageName"], imageErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, imageErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -147,7 +147,7 @@ func (search imagesByDigestSearcher) search(config searchConfig) (bool, error) {
*config.params["digest"], imageErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, imageErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -182,7 +182,7 @@ func (search cveByImageSearcher) search(config searchConfig) (bool, error) {
go config.searchService.getCveByImage(ctx, config, username, password, *config.params["imageName"], strErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, strErr, cancel, printCVETableHeader, errCh)
wg.Wait()
@ -198,7 +198,7 @@ func (search cveByImageSearcher) search(config searchConfig) (bool, error) {
type imagesByCVEIDSearcher struct{}
func (search imagesByCVEIDSearcher) search(config searchConfig) (bool, error) {
if !canSearch(config.params, newSet("cveID")) || *config.fixedFlag {
if !canSearch(config.params, newSet("cvid")) || *config.fixedFlag {
return false, nil
}
@ -210,10 +210,10 @@ func (search imagesByCVEIDSearcher) search(config searchConfig) (bool, error) {
wg.Add(1)
go config.searchService.getImagesByCveID(ctx, config, username, password, *config.params["cveID"], strErr, &wg)
go config.searchService.getImagesByCveID(ctx, config, username, password, *config.params["cvid"], strErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, strErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -229,7 +229,7 @@ func (search imagesByCVEIDSearcher) search(config searchConfig) (bool, error) {
type tagsByImageNameAndCVEIDSearcher struct{}
func (search tagsByImageNameAndCVEIDSearcher) search(config searchConfig) (bool, error) {
if !canSearch(config.params, newSet("cveID", "imageName")) || *config.fixedFlag {
if !canSearch(config.params, newSet("cvid", "imageName")) || *config.fixedFlag {
return false, nil
}
@ -246,10 +246,10 @@ func (search tagsByImageNameAndCVEIDSearcher) search(config searchConfig) (bool,
wg.Add(1)
go config.searchService.getImageByNameAndCVEID(ctx, config, username, password, *config.params["imageName"],
*config.params["cveID"], strErr, &wg)
*config.params["cvid"], strErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, strErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -265,7 +265,7 @@ func (search tagsByImageNameAndCVEIDSearcher) search(config searchConfig) (bool,
type fixedTagsSearcher struct{}
func (search fixedTagsSearcher) search(config searchConfig) (bool, error) {
if !canSearch(config.params, newSet("cveID", "imageName")) || !*config.fixedFlag {
if !canSearch(config.params, newSet("cvid", "imageName")) || !*config.fixedFlag {
return false, nil
}
@ -282,10 +282,10 @@ func (search fixedTagsSearcher) search(config searchConfig) (bool, error) {
wg.Add(1)
go config.searchService.getFixedTagsForCVE(ctx, config, username, password, *config.params["imageName"],
*config.params["cveID"], strErr, &wg)
*config.params["cvid"], strErr, &wg)
wg.Add(1)
var errCh chan error = make(chan error, 1)
errCh := make(chan error, 1)
go collectResults(config, &wg, strErr, cancel, printImageTableHeader, errCh)
wg.Wait()
@ -312,6 +312,7 @@ func collectResults(config searchConfig, wg *sync.WaitGroup, imageErr chan strin
if !ok {
cancel()
return
}
@ -346,6 +347,7 @@ func collectResults(config searchConfig, wg *sync.WaitGroup, imageErr chan strin
func getUsernameAndPassword(user string) (string, string) {
if strings.Contains(user, ":") {
split := strings.Split(user, ":")
return split[0], split[1]
}
@ -394,18 +396,19 @@ func getEmptyStruct() struct{} {
}
func newSet(initialValues ...string) *set {
s := &set{}
s.m = make(map[string]struct{})
ret := &set{}
ret.m = make(map[string]struct{})
for _, val := range initialValues {
s.m[val] = getEmptyStruct()
ret.m[val] = getEmptyStruct()
}
return s
return ret
}
func (s *set) contains(value string) bool {
_, c := s.m[value]
return c
}
@ -434,7 +437,7 @@ func printImageTableHeader(writer io.Writer, verbose bool) {
table.SetColMinWidth(colLayersIndex, layersWidth)
}
row := make([]string, 6)
row := make([]string, 6) //nolint:gomnd
row[colImageNameIndex] = "IMAGE NAME"
row[colTagIndex] = "TAG"
@ -452,7 +455,7 @@ func printImageTableHeader(writer io.Writer, verbose bool) {
func printCVETableHeader(writer io.Writer, verbose bool) {
table := getCVETableWriter(writer)
row := make([]string, 3)
row := make([]string, 3) //nolint:gomnd
row[colCVEIDIndex] = "ID"
row[colCVESeverityIndex] = "SEVERITY"
row[colCVETitleIndex] = "TITLE"

View File

@ -17,25 +17,24 @@ import (
jsoniter "github.com/json-iterator/go"
"github.com/olekukonko/tablewriter"
"gopkg.in/yaml.v2"
zotErrors "zotregistry.io/zot/errors"
)
type SearchService interface {
getAllImages(ctx context.Context, config searchConfig, username, password string,
channel chan stringResult, wg *sync.WaitGroup)
channel chan stringResult, wtgrp *sync.WaitGroup)
getImageByName(ctx context.Context, config searchConfig, username, password, imageName string,
channel chan stringResult, wg *sync.WaitGroup)
channel chan stringResult, wtgrp *sync.WaitGroup)
getCveByImage(ctx context.Context, config searchConfig, username, password, imageName string,
channel chan stringResult, wg *sync.WaitGroup)
getImagesByCveID(ctx context.Context, config searchConfig, username, password, cveID string,
channel chan stringResult, wg *sync.WaitGroup)
channel chan stringResult, wtgrp *sync.WaitGroup)
getImagesByCveID(ctx context.Context, config searchConfig, username, password, cvid string,
channel chan stringResult, wtgrp *sync.WaitGroup)
getImagesByDigest(ctx context.Context, config searchConfig, username, password, digest string,
channel chan stringResult, wg *sync.WaitGroup)
getImageByNameAndCVEID(ctx context.Context, config searchConfig, username, password, imageName, cveID string,
channel chan stringResult, wg *sync.WaitGroup)
getFixedTagsForCVE(ctx context.Context, config searchConfig, username, password, imageName, cveID string,
channel chan stringResult, wg *sync.WaitGroup)
channel chan stringResult, wtgrp *sync.WaitGroup)
getImageByNameAndCVEID(ctx context.Context, config searchConfig, username, password, imageName, cvid string,
channel chan stringResult, wtgrp *sync.WaitGroup)
getFixedTagsForCVE(ctx context.Context, config searchConfig, username, password, imageName, cvid string,
channel chan stringResult, wtgrp *sync.WaitGroup)
}
type searchService struct{}
@ -45,27 +44,27 @@ func NewSearchService() SearchService {
}
func (service searchService) getImageByName(ctx context.Context, config searchConfig,
username, password, imageName string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
username, password, imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
localWg.Add(1)
go getImage(ctx, config, username, password, imageName, c, &localWg, p)
go getImage(ctx, config, username, password, imageName, rch, &localWg, rlim)
localWg.Wait()
}
func (service searchService) getAllImages(ctx context.Context, config searchConfig, username, password string,
c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
catalog := &catalogResponse{}
@ -74,89 +73,88 @@ func (service searchService) getAllImages(ctx context.Context, config searchConf
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
_, err = makeGETRequest(catalogEndPoint, username, password, *config.verifyTLS, catalog)
_, err = makeGETRequest(ctx, catalogEndPoint, username, password, *config.verifyTLS, catalog)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
for _, repo := range catalog.Repositories {
localWg.Add(1)
go getImage(ctx, config, username, password, repo, c, &localWg, p)
go getImage(ctx, config, username, password, repo, rch, &localWg, rlim)
}
localWg.Wait()
}
func getImage(ctx context.Context, config searchConfig, username, password, imageName string,
c chan stringResult, wg *sync.WaitGroup, pool *requestsPool) {
defer wg.Done()
rch chan stringResult, wtgrp *sync.WaitGroup, pool *requestsPool) {
defer wtgrp.Done()
tagListEndpoint, err := combineServerAndEndpointURL(*config.servURL, fmt.Sprintf("/v2/%s/tags/list", imageName))
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
tagsList := &tagListResp{}
_, err = makeGETRequest(tagListEndpoint, username, password, *config.verifyTLS, &tagsList)
_, err = makeGETRequest(ctx, tagListEndpoint, username, password, *config.verifyTLS, &tagsList)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
for _, tag := range tagsList.Tags {
wg.Add(1)
wtgrp.Add(1)
go addManifestCallToPool(ctx, config, pool, username, password, imageName, tag, c, wg)
go addManifestCallToPool(ctx, config, pool, username, password, imageName, tag, rch, wtgrp)
}
}
func (service searchService) getImagesByCveID(ctx context.Context, config searchConfig, username,
password, cveID string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
password, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
query := fmt.Sprintf(`{ImageListForCVE(id: "%s") {`+`
Name Tags }
}`,
cveID)
cvid)
result := &imagesForCve{}
err := service.makeGraphQLQuery(config, username, password, query, result)
err := service.makeGraphQLQuery(ctx, config, username, password, query, result)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -171,23 +169,23 @@ func (service searchService) getImagesByCveID(ctx context.Context, config search
if isContextDone(ctx) {
return
}
c <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
rch <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
return
}
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
for _, image := range result.Data.ImageListForCVE {
for _, tag := range image.Tags {
localWg.Add(1)
go addManifestCallToPool(ctx, config, p, username, password, image.Name, tag, c, &localWg)
go addManifestCallToPool(ctx, config, rlim, username, password, image.Name, tag, rch, &localWg)
}
}
@ -195,9 +193,9 @@ func (service searchService) getImagesByCveID(ctx context.Context, config search
}
func (service searchService) getImagesByDigest(ctx context.Context, config searchConfig, username,
password string, digest string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
password string, digest string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
query := fmt.Sprintf(`{ImageListForDigest(id: "%s") {`+`
Name Tags }
@ -205,13 +203,12 @@ func (service searchService) getImagesByDigest(ctx context.Context, config searc
digest)
result := &imagesForDigest{}
err := service.makeGraphQLQuery(config, username, password, query, result)
err := service.makeGraphQLQuery(ctx, config, username, password, query, result)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -226,23 +223,23 @@ func (service searchService) getImagesByDigest(ctx context.Context, config searc
if isContextDone(ctx) {
return
}
c <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
rch <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
return
}
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
for _, image := range result.Data.ImageListForDigest {
for _, tag := range image.Tags {
localWg.Add(1)
go addManifestCallToPool(ctx, config, p, username, password, image.Name, tag, c, &localWg)
go addManifestCallToPool(ctx, config, rlim, username, password, image.Name, tag, rch, &localWg)
}
}
@ -250,23 +247,22 @@ func (service searchService) getImagesByDigest(ctx context.Context, config searc
}
func (service searchService) getImageByNameAndCVEID(ctx context.Context, config searchConfig, username,
password, imageName, cveID string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
query := fmt.Sprintf(`{ImageListForCVE(id: "%s") {`+`
Name Tags }
}`,
cveID)
cvid)
result := &imagesForCve{}
err := service.makeGraphQLQuery(config, username, password, query, result)
err := service.makeGraphQLQuery(ctx, config, username, password, query, result)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -281,17 +277,17 @@ func (service searchService) getImageByNameAndCVEID(ctx context.Context, config
if isContextDone(ctx) {
return
}
c <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
rch <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
return
}
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
for _, image := range result.Data.ImageListForCVE {
if !strings.EqualFold(imageName, image.Name) {
@ -301,7 +297,7 @@ func (service searchService) getImageByNameAndCVEID(ctx context.Context, config
for _, tag := range image.Tags {
localWg.Add(1)
go addManifestCallToPool(ctx, config, p, username, password, image.Name, tag, c, &localWg)
go addManifestCallToPool(ctx, config, rlim, username, password, image.Name, tag, rch, &localWg)
}
}
@ -309,22 +305,21 @@ func (service searchService) getImageByNameAndCVEID(ctx context.Context, config
}
func (service searchService) getCveByImage(ctx context.Context, config searchConfig, username, password,
imageName string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
query := fmt.Sprintf(`{ CVEListForImage (image:"%s")`+
` { Tag CVEList { Id Title Severity Description `+
`PackageList {Name InstalledVersion FixedVersion}} } }`, imageName)
result := &cveResult{}
err := service.makeGraphQLQuery(config, username, password, query, result)
err := service.makeGraphQLQuery(ctx, config, username, password, query, result)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -339,7 +334,7 @@ func (service searchService) getCveByImage(ctx context.Context, config searchCon
if isContextDone(ctx) {
return
}
c <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
rch <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
return
}
@ -351,7 +346,7 @@ func (service searchService) getCveByImage(ctx context.Context, config searchCon
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -359,7 +354,7 @@ func (service searchService) getCveByImage(ctx context.Context, config searchCon
if isContextDone(ctx) {
return
}
c <- stringResult{str, nil}
rch <- stringResult{str, nil}
}
func groupCVEsBySeverity(cveList []cve) []cve {
@ -393,23 +388,22 @@ func isContextDone(ctx context.Context) bool {
}
func (service searchService) getFixedTagsForCVE(ctx context.Context, config searchConfig,
username, password, imageName, cveID string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
defer close(c)
username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
defer close(rch)
query := fmt.Sprintf(`{ImageListWithCVEFixed (id: "%s", image: "%s") {`+`
Tags {Name Timestamp} }
}`,
cveID, imageName)
cvid, imageName)
result := &fixedTags{}
err := service.makeGraphQLQuery(config, username, password, query, result)
err := service.makeGraphQLQuery(ctx, config, username, password, query, result)
if err != nil {
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
return
}
@ -424,22 +418,22 @@ func (service searchService) getFixedTagsForCVE(ctx context.Context, config sear
if isContextDone(ctx) {
return
}
c <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
rch <- stringResult{"", errors.New(errBuilder.String())} //nolint: goerr113
return
}
var localWg sync.WaitGroup
p := newSmoothRateLimiter(ctx, &localWg, c)
rlim := newSmoothRateLimiter(ctx, &localWg, rch)
localWg.Add(1)
go p.startRateLimiter()
go rlim.startRateLimiter(ctx)
for _, imgTag := range result.Data.ImageListWithCVEFixed.Tags {
localWg.Add(1)
go addManifestCallToPool(ctx, config, p, username, password, imageName, imgTag.Name, c, &localWg)
go addManifestCallToPool(ctx, config, rlim, username, password, imageName, imgTag.Name, rch, &localWg)
}
localWg.Wait()
@ -447,14 +441,15 @@ func (service searchService) getFixedTagsForCVE(ctx context.Context, config sear
// Query using JQL, the query string is passed as a parameter
// errors are returned in the stringResult channel, the unmarshalled payload is in resultPtr.
func (service searchService) makeGraphQLQuery(config searchConfig, username, password, query string,
func (service searchService) makeGraphQLQuery(ctx context.Context, config searchConfig,
username, password, query string,
resultPtr interface{}) error {
endPoint, err := combineServerAndEndpointURL(*config.servURL, "/query")
if err != nil {
return err
}
err = makeGraphQLRequest(endPoint, query, username, password, *config.verifyTLS, resultPtr)
err = makeGraphQLRequest(ctx, endPoint, query, username, password, *config.verifyTLS, resultPtr)
if err != nil {
return err
}
@ -462,9 +457,9 @@ func (service searchService) makeGraphQLQuery(config searchConfig, username, pas
return nil
}
func addManifestCallToPool(ctx context.Context, config searchConfig, p *requestsPool, username, password, imageName,
tagName string, c chan stringResult, wg *sync.WaitGroup) {
defer wg.Done()
func addManifestCallToPool(ctx context.Context, config searchConfig, pool *requestsPool,
username, password, imageName, tagName string, rch chan stringResult, wtgrp *sync.WaitGroup) {
defer wtgrp.Done()
resultManifest := manifestResponse{}
@ -474,7 +469,7 @@ func addManifestCallToPool(ctx context.Context, config searchConfig, p *requests
if isContextDone(ctx) {
return
}
c <- stringResult{"", err}
rch <- stringResult{"", err}
}
job := manifestJob{
@ -487,23 +482,28 @@ func addManifestCallToPool(ctx context.Context, config searchConfig, p *requests
config: config,
}
wg.Add(1)
p.submitJob(&job)
wtgrp.Add(1)
pool.submitJob(&job)
}
type cveResult struct {
Errors []errorGraphQL `json:"errors"`
Data cveData `json:"data"`
}
type errorGraphQL struct {
Message string `json:"message"`
Path []string `json:"path"`
}
//nolint:tagliatelle // graphQL schema
type packageList struct {
Name string `json:"Name"`
InstalledVersion string `json:"InstalledVersion"`
FixedVersion string `json:"FixedVersion"`
}
//nolint:tagliatelle // graphQL schema
type cve struct {
ID string `json:"Id"`
Severity string `json:"Severity"`
@ -511,10 +511,14 @@ type cve struct {
Description string `json:"Description"`
PackageList []packageList `json:"PackageList"`
}
//nolint:tagliatelle // graphQL schema
type cveListForImage struct {
Tag string `json:"Tag"`
CVEList []cve `json:"CVEList"`
}
//nolint:tagliatelle // graphQL schema
type cveData struct {
CVEListForImage cveListForImage `json:"CVEListForImage"`
}
@ -538,10 +542,10 @@ func (cve cveResult) stringPlainText() (string, error) {
table := getCVETableWriter(&builder)
for _, c := range cve.Data.CVEListForImage.CVEList {
id := ellipsize(c.ID, cveIDWidth, ellipsis)
id := ellipsize(c.ID, cvidWidth, ellipsis)
title := ellipsize(c.Title, cveTitleWidth, ellipsis)
severity := ellipsize(c.Severity, cveSeverityWidth, ellipsis)
row := make([]string, 3)
row := make([]string, 3) //nolint:gomnd
row[colCVEIDIndex] = id
row[colCVESeverityIndex] = severity
row[colCVETitleIndex] = title
@ -555,9 +559,9 @@ func (cve cveResult) stringPlainText() (string, error) {
}
func (cve cveResult) stringJSON() (string, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
body, err := json.MarshalIndent(cve.Data.CVEListForImage, "", " ")
json := jsoniter.ConfigCompatibleWithStandardLibrary
body, err := json.MarshalIndent(cve.Data.CVEListForImage, "", " ")
if err != nil {
return "", err
}
@ -567,7 +571,6 @@ func (cve cveResult) stringJSON() (string, error) {
func (cve cveResult) stringYAML() (string, error) {
body, err := yaml.Marshal(&cve.Data.CVEListForImage)
if err != nil {
return "", err
}
@ -578,6 +581,7 @@ func (cve cveResult) stringYAML() (string, error) {
type fixedTags struct {
Errors []errorGraphQL `json:"errors"`
Data struct {
//nolint:tagliatelle // graphQL schema
ImageListWithCVEFixed struct {
Tags []struct {
Name string `json:"Name"`
@ -590,14 +594,14 @@ type fixedTags struct {
type imagesForCve struct {
Errors []errorGraphQL `json:"errors"`
Data struct {
ImageListForCVE []tagListResp `json:"ImageListForCVE"`
ImageListForCVE []tagListResp `json:"ImageListForCVE"` //nolint:tagliatelle // graphQL schema
} `json:"data"`
}
type imagesForDigest struct {
Errors []errorGraphQL `json:"errors"`
Data struct {
ImageListForDigest []tagListResp `json:"ImageListForDigest"`
ImageListForDigest []tagListResp `json:"ImageListForDigest"` //nolint:tagliatelle // graphQL schema
} `json:"data"`
}
@ -658,7 +662,7 @@ func (img imageStruct) stringPlainText() (string, error) {
digest := ellipsize(tag.Digest, digestWidth, "")
size := ellipsize(strings.ReplaceAll(humanize.Bytes(tag.Size), " ", ""), sizeWidth, ellipsis)
config := ellipsize(tag.ConfigDigest, configWidth, "")
row := make([]string, 6)
row := make([]string, 6) //nolint:gomnd
row[colImageNameIndex] = imageName
row[colTagIndex] = tagName
@ -677,7 +681,7 @@ func (img imageStruct) stringPlainText() (string, error) {
layerSize := ellipsize(strings.ReplaceAll(humanize.Bytes(entry.Size), " ", ""), sizeWidth, ellipsis)
layerDigest := ellipsize(entry.Digest, digestWidth, "")
layerRow := make([]string, 6)
layerRow := make([]string, 6) //nolint:gomnd
layerRow[colImageNameIndex] = ""
layerRow[colTagIndex] = ""
layerRow[colDigestIndex] = ""
@ -696,9 +700,9 @@ func (img imageStruct) stringPlainText() (string, error) {
}
func (img imageStruct) stringJSON() (string, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
body, err := json.MarshalIndent(img, "", " ")
json := jsoniter.ConfigCompatibleWithStandardLibrary
body, err := json.MarshalIndent(img, "", " ")
if err != nil {
return "", err
}
@ -708,7 +712,6 @@ func (img imageStruct) stringJSON() (string, error) {
func (img imageStruct) stringYAML() (string, error) {
body, err := yaml.Marshal(&img)
if err != nil {
return "", err
}
@ -727,8 +730,8 @@ type manifestResponse struct {
Size uint64 `json:"size"`
} `json:"layers"`
Annotations struct {
WsTychoStackerStackerYaml string `json:"ws.tycho.stacker.stacker_yaml"`
WsTychoStackerGitVersion string `json:"ws.tycho.stacker.git_version"`
WsTychoStackerStackerYaml string `json:"ws.tycho.stacker.stacker_yaml"` //nolint:tagliatelle // custom annotation
WsTychoStackerGitVersion string `json:"ws.tycho.stacker.git_version"` //nolint:tagliatelle // custom annotation
} `json:"annotations"`
Config struct {
Size int `json:"size"`
@ -744,7 +747,6 @@ func combineServerAndEndpointURL(serverURL, endPoint string) (string, error) {
}
newURL, err := url.Parse(serverURL)
if err != nil {
return "", zotErrors.ErrInvalidURL
}
@ -797,7 +799,7 @@ func getCVETableWriter(writer io.Writer) *tablewriter.Table {
table.SetBorder(false)
table.SetTablePadding(" ")
table.SetNoWhiteSpace(true)
table.SetColMinWidth(colCVEIDIndex, cveIDWidth)
table.SetColMinWidth(colCVEIDIndex, cvidWidth)
table.SetColMinWidth(colCVESeverityIndex, cveSeverityWidth)
table.SetColMinWidth(colCVETitleIndex, cveTitleWidth)
@ -820,7 +822,7 @@ const (
colLayersIndex = 4
colSizeIndex = 5
cveIDWidth = 16
cvidWidth = 16
cveSeverityWidth = 8
cveTitleWidth = 48

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@ type Controller struct {
func NewController(cfg *Config) *Controller {
logger := log.NewLogger(cfg.Exporter.Log.Level, cfg.Exporter.Log.Output)
return &Controller{Config: cfg, Log: logger}
}

View File

@ -5,10 +5,11 @@ package api_test
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"math/big"
"net/http"
"os"
"strings"
@ -33,8 +34,12 @@ const (
)
func getRandomLatencyN(maxNanoSeconds int64) time.Duration {
rand.Seed(time.Now().UnixNano())
return time.Duration(rand.Int63n(maxNanoSeconds))
nBig, err := rand.Int(rand.Reader, big.NewInt(maxNanoSeconds))
if err != nil {
panic(err)
}
return time.Duration(nBig.Int64())
}
func getRandomLatency() time.Duration {
@ -59,20 +64,20 @@ func isChannelDrained(ch chan prometheus.Metric) bool {
}
}
func readDefaultMetrics(zc *api.Collector, ch chan prometheus.Metric) {
func readDefaultMetrics(collector *api.Collector, chMetric chan prometheus.Metric) {
var metric dto.Metric
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_up"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String())
err := pm.Write(&metric)
err := pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Gauge.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_info"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_info"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Gauge.Value, ShouldEqual, 0)
}
@ -97,17 +102,17 @@ func TestNewExporter(t *testing.T) {
}()
time.Sleep(SleepTime)
zc := api.GetCollector(exporterController)
ch := make(chan prometheus.Metric)
collector := api.GetCollector(exporterController)
chMetric := make(chan prometheus.Metric)
Convey("When zot server not running", func() {
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
// Read from the channel expected values
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_up"].String())
pm := <-chMetric
So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String())
var metric dto.Metric
err := pm.Write(&metric)
@ -115,7 +120,7 @@ func TestNewExporter(t *testing.T) {
So(*metric.Gauge.Value, ShouldEqual, 0) // "zot_up=0" means zot server is not running
// Check that no more data was written to the channel
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("When zot server is running", func() {
servercConfig := zotcfg.New()
@ -156,54 +161,58 @@ func TestNewExporter(t *testing.T) {
Convey("Collecting data: default metrics", func() {
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
So(isChannelDrained(ch), ShouldEqual, true)
readDefaultMetrics(collector, chMetric)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that increment works on Counters", func() {
//Testing initial value of the counter to be 1 after first incrementation call
// Testing initial value of the counter to be 1 after first incrementation call
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_uploads_total"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String())
var metric dto.Metric
err := pm.Write(&metric)
err := pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
//Testing that counter is incremented by 1
// Testing that counter is incremented by 1
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_uploads_total"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 2)
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test that concurent Counter increment requests works properly", func() {
reqsSize := rand.Intn(1000)
nBig, err := rand.Int(rand.Reader, big.NewInt(1000))
if err != nil {
panic(err)
}
reqsSize := int(nBig.Int64())
for i := 0; i < reqsSize; i++ {
monitoring.IncDownloadCounter(serverController.Metrics, "dummyrepo")
}
@ -211,79 +220,83 @@ func TestNewExporter(t *testing.T) {
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_downloads_total"].String())
readDefaultMetrics(collector, chMetric)
pm := <-chMetric
So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_downloads_total"].String())
var metric dto.Metric
err := pm.Write(&metric)
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, reqsSize)
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that observe works on Summaries", func() {
//Testing initial value of the summary counter to be 1 after first observation call
// Testing initial value of the summary counter to be 1 after first observation call
var latency1, latency2 time.Duration
latency1 = getRandomLatency()
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency1)
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
// this blocks
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
err := pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latency1.Seconds())
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
//Testing that summary counter is incremented by 1 and summary sum is properly updated
// Testing that summary counter is incremented by 1 and summary sum is properly updated
latency2 = getRandomLatency()
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency2)
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 2)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, (latency1.Seconds())+(latency2.Seconds()))
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test that concurent Summary observation requests works properly", func() {
var latencySum float64
reqsSize := rand.Intn(1000)
nBig, err := rand.Int(rand.Reader, big.NewInt(1000))
if err != nil {
panic(err)
}
reqsSize := int(nBig.Int64())
for i := 0; i < reqsSize; i++ {
latency := getRandomLatency()
latencySum += latency.Seconds()
@ -293,59 +306,60 @@ func TestNewExporter(t *testing.T) {
go func() {
// this blocks
zc.Collect(ch)
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, reqsSize)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latencySum)
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that observe works on Histogram buckets", func() {
//Testing initial value of the histogram counter to be 1 after first observation call
// Testing initial value of the histogram counter to be 1 after first observation call
latency := getRandomLatency()
monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency)
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
// this blocks
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_count"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
err := pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latency.Seconds())
for _, fvalue := range monitoring.GetDefaultBuckets() {
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual,
collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
if latency.Seconds() < fvalue {
So(*metric.Counter.Value, ShouldEqual, 1)
@ -354,21 +368,21 @@ func TestNewExporter(t *testing.T) {
}
}
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Collecting data: Test init Histogram buckets \n", func() {
//Generate a random latency within each bucket and finally test
// Generate a random latency within each bucket and finally test
// that "higher" rank bucket counter is incremented by 1
var latencySum float64
dBuckets := monitoring.GetDefaultBuckets()
for i, fvalue := range dBuckets {
for index, fvalue := range dBuckets {
var latency time.Duration
if i == 0 {
//first bucket value
if index == 0 {
// first bucket value
latency = getRandomLatencyN(int64(fvalue * SecondToNanoseconds))
} else {
pvalue := dBuckets[i-1] // previous bucket value
pvalue := dBuckets[index-1] // previous bucket value
latency = time.Duration(pvalue*SecondToNanoseconds) +
getRandomLatencyN(int64(dBuckets[0]*SecondToNanoseconds))
}
@ -378,36 +392,38 @@ func TestNewExporter(t *testing.T) {
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
// this blocks
collector.Collect(chMetric)
}()
readDefaultMetrics(zc, ch)
readDefaultMetrics(collector, chMetric)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_count"].String())
pmMetric := <-chMetric
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
err := pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, len(dBuckets))
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual,
collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latencySum)
for i := range dBuckets {
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
for index := range dBuckets {
pmMetric = <-chMetric
So(pmMetric.Desc().String(), ShouldEqual,
collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
err = pm.Write(&metric)
err = pmMetric.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, i+1)
So(*metric.Counter.Value, ShouldEqual, index+1)
}
So(isChannelDrained(ch), ShouldEqual, true)
So(isChannelDrained(chMetric), ShouldEqual, true)
})
Convey("Negative testing: Send unknown metric type to MetricServer", func() {
serverController.Metrics.SendMetric(getRandomLatency())
@ -415,13 +431,17 @@ func TestNewExporter(t *testing.T) {
Convey("Concurrent metrics scrape", func() {
var wg sync.WaitGroup
workersSize := rand.Intn(100)
nBig, err := rand.Int(rand.Reader, big.NewInt(100))
if err != nil {
panic(err)
}
workersSize := int(nBig.Int64())
for i := 0; i < workersSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
m := serverController.Metrics.ReceiveMetrics()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json := jsoniter.ConfigCompatibleWithStandardLibrary
_, err := json.Marshal(m)
if err != nil {

View File

@ -1,6 +1,7 @@
//go:build minimal
// +build minimal
// nolint: varnamelen
package api
import (
@ -32,9 +33,8 @@ func (zc Collector) Describe(ch chan<- *prometheus.Desc) {
// Implements prometheus.Collector interface.
func (zc Collector) Collect(ch chan<- prometheus.Metric) {
metrics, err := zc.Client.GetMetrics()
if err != nil {
fmt.Println(err)
fmt.Printf("error getting metrics: %v\n", err)
ch <- prometheus.MustNewConstMetric(zc.MetricsDesc["zot_up"], prometheus.GaugeValue, 0)
return
@ -54,15 +54,15 @@ func (zc Collector) Collect(ch chan<- prometheus.Metric) {
zc.MetricsDesc[name], prometheus.CounterValue, float64(c.Count), c.LabelValues...)
}
for _, s := range metrics.Summaries {
mname := zc.invalidChars.ReplaceAllLiteralString(s.Name, "_")
for _, summary := range metrics.Summaries {
mname := zc.invalidChars.ReplaceAllLiteralString(summary.Name, "_")
name := mname + "_count"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, float64(s.Count), s.LabelValues...)
zc.MetricsDesc[name], prometheus.CounterValue, float64(summary.Count), summary.LabelValues...)
name = mname + "_sum"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, s.Sum, s.LabelValues...)
zc.MetricsDesc[name], prometheus.CounterValue, summary.Sum, summary.LabelValues...)
}
for _, h := range metrics.Histograms {
@ -99,7 +99,7 @@ func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log
}
func GetCollector(c *Controller) *Collector {
//compute all metrics description map
// compute all metrics description map
MetricsDesc := map[string]*prometheus.Desc{
"zot_up": prometheus.NewDesc(
"zot_up",

View File

@ -4,13 +4,12 @@
package cli
import (
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/exporter/api"
"github.com/mitchellh/mapstructure"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/exporter/api"
)
// metadataConfig reports metadata after parsing, which we use to track
@ -64,13 +63,13 @@ func loadConfiguration(config *api.Config, configPath string) {
panic(err)
}
md := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(md)); err != nil {
metaData := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil {
log.Error().Err(err).Msg("Error while unmarshalling new config")
panic(err)
}
if len(md.Keys) == 0 || len(md.Unused) > 0 {
if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 {
log.Error().Err(errors.ErrBadConfig).Msg("Bad configuration, retry writing it")
panic(errors.ErrBadConfig)
}

View File

@ -42,5 +42,6 @@ func SyncOneImage(config *config.Config, storeController storage.StoreController
repoName, reference string, log log.Logger) error {
log.Warn().Msg("skipping syncing on demand because given zot binary doesn't support any extensions," +
"please build zot full binary for this feature")
return nil
}

View File

@ -1,6 +1,7 @@
package monitoring
import (
"fmt"
"math"
"os"
"path/filepath"
@ -28,8 +29,9 @@ func getDirSize(path string) (int64, error) {
if !info.IsDir() {
size += info.Size()
}
return err
})
return size, err
return size, fmt.Errorf("getDirSize: %w", err)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/log"
)
@ -90,14 +91,26 @@ func NewMetricsServer(enabled bool, log log.Logger) MetricServer {
// implementing the MetricServer interface.
func (ms *metricServer) SendMetric(mfunc interface{}) {
if ms.enabled {
fn := mfunc.(func())
fn()
mfn, ok := mfunc.(func())
if !ok {
ms.log.Error().Err(errors.ErrInvalidMetric).Msg("type conversion")
return
}
mfn()
}
}
func (ms *metricServer) ForceSendMetric(mfunc interface{}) {
fn := mfunc.(func())
fn()
mfn, ok := mfunc.(func())
if !ok {
ms.log.Error().Err(errors.ErrInvalidMetric).Msg("type conversion")
return
}
mfn()
}
func (ms *metricServer) ReceiveMetrics() interface{} {

View File

@ -1,6 +1,7 @@
//go:build minimal
// +build minimal
// nolint: varnamelen,forcetypeassert
package monitoring
import (
@ -16,16 +17,16 @@ import (
const (
metricsNamespace = "zot"
// Counters
// Counters.
httpConnRequests = metricsNamespace + ".http.requests"
repoDownloads = metricsNamespace + ".repo.downloads"
repoUploads = metricsNamespace + ".repo.uploads"
//Gauge
// Gauge.
repoStorageBytes = metricsNamespace + ".repo.storage.bytes"
serverInfo = metricsNamespace + ".info"
//Summary
// Summary.
httpRepoLatencySeconds = metricsNamespace + ".http.repo.latency.seconds"
//Histogram
// Histogram.
httpMethodLatencySeconds = metricsNamespace + ".http.method.latency.seconds"
metricsScrapeTimeout = 2 * time.Minute
@ -109,6 +110,7 @@ func (ms *metricServer) ReceiveMetrics() interface{} {
func (ms *metricServer) IsEnabled() (b bool) {
// send a bool value on the request channel to avoid data race
ms.reqChan <- b
return (<-ms.reqChan).(bool)
}
@ -288,13 +290,14 @@ func findHistogramValueIndex(metricSlice []*HistogramValue, name string, labelVa
}
func (ms *metricServer) CounterInc(cv *CounterValue) {
kLabels, ok := GetCounters()[cv.Name] // known label names for the 'name' counter
err := sanityChecks(cv.Name, kLabels, ok, cv.LabelNames, cv.LabelValues)
labels, ok := GetCounters()[cv.Name] // known label names for the 'name' counter
err := sanityChecks(cv.Name, labels, ok, cv.LabelNames, cv.LabelValues)
if err != nil {
// The last thing we want is to panic/stop the server due to instrumentation
// thus log a message (should be detected during development of new metrics)
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
@ -309,11 +312,12 @@ func (ms *metricServer) CounterInc(cv *CounterValue) {
}
func (ms *metricServer) GaugeSet(gv *GaugeValue) {
kLabels, ok := GetGauges()[gv.Name] // known label names for the 'name' counter
err := sanityChecks(gv.Name, kLabels, ok, gv.LabelNames, gv.LabelValues)
labels, ok := GetGauges()[gv.Name] // known label names for the 'name' counter
err := sanityChecks(gv.Name, labels, ok, gv.LabelNames, gv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
@ -327,11 +331,12 @@ func (ms *metricServer) GaugeSet(gv *GaugeValue) {
}
func (ms *metricServer) SummaryObserve(sv *SummaryValue) {
kLabels, ok := GetSummaries()[sv.Name] // known label names for the 'name' summary
err := sanityChecks(sv.Name, kLabels, ok, sv.LabelNames, sv.LabelValues)
labels, ok := GetSummaries()[sv.Name] // known label names for the 'name' summary
err := sanityChecks(sv.Name, labels, ok, sv.LabelNames, sv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
@ -347,11 +352,12 @@ func (ms *metricServer) SummaryObserve(sv *SummaryValue) {
}
func (ms *metricServer) HistogramObserve(hv *HistogramValue) {
kLabels, ok := GetHistograms()[hv.Name] // known label names for the 'name' counter
err := sanityChecks(hv.Name, kLabels, ok, hv.LabelNames, hv.LabelValues)
labels, ok := GetHistograms()[hv.Name] // known label names for the 'name' counter
err := sanityChecks(hv.Name, labels, ok, hv.LabelNames, hv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
@ -465,8 +471,8 @@ func IncUploadCounter(ms MetricServer, repo string) {
func SetStorageUsage(ms MetricServer, rootDir string, repo string) {
dir := path.Join(rootDir, repo)
repoSize, err := getDirSize(dir)
repoSize, err := getDirSize(dir)
if err != nil {
ms.(*metricServer).log.Error().Err(err).Msg("failed to set storage usage")
}

View File

@ -4,8 +4,10 @@
package monitoring
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"time"
@ -66,21 +68,20 @@ func (mc *MetricsClient) GetMetrics() (*MetricsInfo, error) {
}
func (mc *MetricsClient) makeGETRequest(url string, resultsPtr interface{}) (http.Header, error) {
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequestWithContext(context.Background(), "GET", url, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("metric scraping: %w", err)
}
resp, err := mc.config.HTTPClient.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("metric scraping error: %w", err)
}
defer resp.Body.Close()
if err := json.NewDecoder(resp.Body).Decode(resultsPtr); err != nil {
return nil, err
return nil, fmt.Errorf("metric scraping failed: %w", err)
}
return resp.Header, nil

View File

@ -56,8 +56,8 @@ func GetRootDir(image string, storeController storage.StoreController) string {
func GetRepo(image string) string {
if strings.Contains(image, ":") {
splitString := strings.SplitN(image, ":", 2)
if len(splitString) != 2 { //nolint: gomnd
splitString := strings.SplitN(image, ":", 2) //nolint:gomnd
if len(splitString) != 2 { //nolint:gomnd
return image
}
@ -100,9 +100,9 @@ func GetLatestTag(allTags []TagInfo) TagInfo {
}
func GetRoutePrefix(name string) string {
names := strings.SplitN(name, "/", 2)
names := strings.SplitN(name, "/", 2) //nolint:gomnd
if len(names) != 2 { // nolint: gomnd
if len(names) != 2 { // nolint:gomnd
// it means route is of global storage e.g "centos:latest"
if len(names) == 1 {
return "/"

View File

@ -36,6 +36,7 @@ type ImgResponsWithLatestTag struct {
Errors []ErrorGQL `json:"errors"`
}
//nolint:tagliatelle // graphQL schema
type ImgListWithLatestTag struct {
Images []ImageInfo `json:"ImageListWithLatestTag"`
}
@ -87,18 +88,26 @@ func testSetup() error {
func getTags() ([]common.TagInfo, []common.TagInfo) {
tags := make([]common.TagInfo, 0)
firstTag := common.TagInfo{Name: "1.0.0",
firstTag := common.TagInfo{
Name: "1.0.0",
Digest: "sha256:eca04f027f414362596f2632746d8a178362170b9ac9af772011fedcc3877ebb",
Timestamp: time.Now()}
secondTag := common.TagInfo{Name: "1.0.1",
Timestamp: time.Now(),
}
secondTag := common.TagInfo{
Name: "1.0.1",
Digest: "sha256:eca04f027f414362596f2632746d8a179362170b9ac9af772011fedcc3877ebb",
Timestamp: time.Now()}
thirdTag := common.TagInfo{Name: "1.0.2",
Timestamp: time.Now(),
}
thirdTag := common.TagInfo{
Name: "1.0.2",
Digest: "sha256:eca04f027f414362596f2632746d8a170362170b9ac9af772011fedcc3877ebb",
Timestamp: time.Now()}
fourthTag := common.TagInfo{Name: "1.0.3",
Timestamp: time.Now(),
}
fourthTag := common.TagInfo{
Name: "1.0.3",
Digest: "sha256:eca04f027f414362596f2632746d8a171362170b9ac9af772011fedcc3877ebb",
Timestamp: time.Now()}
Timestamp: time.Now(),
}
tags = append(tags, firstTag, secondTag, thirdTag, fourthTag)
@ -183,11 +192,11 @@ func TestLatestTagSearchHTTP(t *testing.T) {
conf.Extensions.Search.CVE = nil
c := api.NewController(conf)
ctlr := api.NewController(conf)
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -204,7 +213,7 @@ func TestLatestTagSearchHTTP(t *testing.T) {
// shut down server
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
resp, err := resty.R().Get(baseURL + "/v2/")
@ -234,7 +243,7 @@ func TestLatestTagSearchHTTP(t *testing.T) {
So(resp, ShouldNotBeNil)
So(err, ShouldBeNil)
err = os.Chmod(rootDir, 0000)
err = os.Chmod(rootDir, 0o000)
if err != nil {
panic(err)
}
@ -248,7 +257,7 @@ func TestLatestTagSearchHTTP(t *testing.T) {
So(err, ShouldBeNil)
So(len(responseStruct.ImgListWithLatestTag.Images), ShouldEqual, 0)
err = os.Chmod(rootDir, 0755)
err = os.Chmod(rootDir, 0o755)
if err != nil {
panic(err)
}

View File

@ -3,12 +3,11 @@ package common
import (
"encoding/json"
goerrors "errors"
"path"
"strings"
"time"
goerrors "errors"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
godigest "github.com/opencontainers/go-digest"
@ -32,8 +31,8 @@ func NewOciLayoutUtils(storeController storage.StoreController, log log.Logger)
// Below method will return image path including root dir, root dir is determined by splitting.
func (olu OciLayoutUtils) GetImageManifests(image string) ([]ispec.Descriptor, error) {
imageStore := olu.StoreController.GetImageStore(image)
buf, err := imageStore.GetIndexContent(image)
buf, err := imageStore.GetIndexContent(image)
if err != nil {
if goerrors.Is(errors.ErrRepoNotFound, err) {
olu.Log.Error().Err(err).Msg("index.json doesn't exist")
@ -50,6 +49,7 @@ func (olu OciLayoutUtils) GetImageManifests(image string) ([]ispec.Descriptor, e
if err := json.Unmarshal(buf, &index); err != nil {
olu.Log.Error().Err(err).Str("dir", path.Join(imageStore.RootDir(), image)).Msg("invalid JSON")
return nil, errors.ErrRepoNotFound
}
@ -108,14 +108,14 @@ func (olu OciLayoutUtils) IsValidImageFormat(image string) (bool, error) {
return false, err
}
for _, m := range manifests {
tag, ok := m.Annotations[ispec.AnnotationRefName]
for _, manifest := range manifests {
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && inputTag != "" && tag != inputTag {
continue
}
blobManifest, err := olu.GetImageBlobManifest(imageDir, m.Digest)
blobManifest, err := olu.GetImageBlobManifest(imageDir, manifest.Digest)
if err != nil {
return false, err
}
@ -129,6 +129,7 @@ func (olu OciLayoutUtils) IsValidImageFormat(image string) (bool, error) {
default:
olu.Log.Debug().Msg("image media type not supported for scanning")
return false, errors.ErrScanNotSupported
}
}
@ -151,7 +152,7 @@ func (olu OciLayoutUtils) GetImageTagsWithTimestamp(repo string) ([]TagInfo, err
for _, manifest := range manifests {
digest := manifest.Digest
v, ok := manifest.Annotations[ispec.AnnotationRefName]
val, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok {
imageBlobManifest, err := olu.GetImageBlobManifest(repo, digest)
if err != nil {
@ -175,7 +176,7 @@ func (olu OciLayoutUtils) GetImageTagsWithTimestamp(repo string) ([]TagInfo, err
timeStamp = time.Time{}
}
tagsInfo = append(tagsInfo, TagInfo{Name: v, Timestamp: timeStamp, Digest: digest.String()})
tagsInfo = append(tagsInfo, TagInfo{Name: val, Timestamp: timeStamp, Digest: digest.String()})
}
}

View File

@ -18,7 +18,7 @@ import (
)
func getRoutePrefix(name string) string {
names := strings.SplitN(name, "/", 2)
names := strings.SplitN(name, "/", 2) //nolint:gomnd
if len(names) != 2 { // nolint: gomnd
// it means route is of global storage e.g "centos:latest"
@ -104,8 +104,10 @@ func GetCVEInfo(storeController storage.StoreController, log log.Logger) (*CveIn
cveController.SubCveConfig = subCveConfig
return &CveInfo{Log: log, CveTrivyController: cveController, StoreController: storeController,
LayoutUtils: layoutUtils}, nil
return &CveInfo{
Log: log, CveTrivyController: cveController, StoreController: storeController,
LayoutUtils: layoutUtils,
}, nil
}
func (cveinfo CveInfo) GetTrivyContext(image string) *TrivyCtx {
@ -137,7 +139,7 @@ func (cveinfo CveInfo) GetTrivyContext(image string) *TrivyCtx {
return trivyCtx
}
func (cveinfo CveInfo) GetImageListForCVE(repo string, id string, imgStore storage.ImageStore,
func (cveinfo CveInfo) GetImageListForCVE(repo string, cvid string, imgStore storage.ImageStore,
trivyCtx *TrivyCtx) ([]*string, error) {
tags := make([]*string, 0)
@ -173,7 +175,7 @@ func (cveinfo CveInfo) GetImageListForCVE(repo string, id string, imgStore stora
for _, result := range report.Results {
for _, vulnerability := range result.Vulnerabilities {
if vulnerability.VulnerabilityID == id {
if vulnerability.VulnerabilityID == cvid {
copyImgTag := tag
tags = append(tags, &copyImgTag)

View File

@ -1,7 +1,7 @@
//go:build extended
// +build extended
// nolint: lll
// nolint:lll,gosimple
package cveinfo_test
import (
@ -48,10 +48,12 @@ type ImgWithFixedCVE struct {
ImgResults ImgResults `json:"data"`
}
//nolint:tagliatelle // graphQL schema
type ImgResults struct {
ImgResultForFixedCVE ImgResultForFixedCVE `json:"ImgResultForFixedCVE"`
}
//nolint:tagliatelle // graphQL schema
type ImgResultForFixedCVE struct {
Tags []TagInfo `json:"Tags"`
}
@ -61,15 +63,18 @@ type TagInfo struct {
Timestamp time.Time
}
//nolint:tagliatelle // graphQL schema
type ImgList struct {
CVEResultForImage CVEResultForImage `json:"CVEListForImage"`
}
//nolint:tagliatelle // graphQL schema
type CVEResultForImage struct {
Tag string `json:"Tag"`
CVEList []CVE `json:"CVEList"`
}
//nolint:tagliatelle // graphQL schema
type CVE struct {
ID string `json:"Id"`
Description string `json:"Description"`
@ -108,12 +113,12 @@ func testSetup() error {
func generateTestData() error { // nolint: gocyclo
// Image dir with no files
err := os.Mkdir(path.Join(dbDir, "zot-noindex-test"), 0755)
err := os.Mkdir(path.Join(dbDir, "zot-noindex-test"), 0o755)
if err != nil {
return err
}
err = os.Mkdir(path.Join(dbDir, "zot-nonreadable-test"), 0755)
err = os.Mkdir(path.Join(dbDir, "zot-nonreadable-test"), 0o755)
if err != nil {
return err
}
@ -126,17 +131,17 @@ func generateTestData() error { // nolint: gocyclo
return err
}
if err = ioutil.WriteFile(path.Join(dbDir, "zot-nonreadable-test", "index.json"), buf, 0111); err != nil {
if err = ioutil.WriteFile(path.Join(dbDir, "zot-nonreadable-test", "index.json"), buf, 0o111); err != nil {
return err
}
// Image dir with invalid index.json
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-invalid-index"), 0755)
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-invalid-index"), 0o755)
if err != nil {
return err
}
content := fmt.Sprintf(`{"schemaVersion": 2,"manifests"[{"mediaType": "application/vnd.oci.image.manifest.v1+json","digest": "sha256:2a9b097b4e4c613dd8185eba55163201a221909f3d430f8df87cd3639afc5929","size": 1240,"annotations": {"org.opencontainers.image.ref.name": "commit-aaa7c6e7-squashfs"},"platform": {"architecture": "amd64","os": "linux"}}]}`)
content := `{"schemaVersion": 2,"manifests"[{"mediaType": "application/vnd.oci.image.manifest.v1+json","digest": "sha256:2a9b097b4e4c613dd8185eba55163201a221909f3d430f8df87cd3639afc5929","size": 1240,"annotations": {"org.opencontainers.image.ref.name": "commit-aaa7c6e7-squashfs"},"platform": {"architecture": "amd64","os": "linux"}}]}`
err = makeTestFile(path.Join(dbDir, "zot-squashfs-invalid-index", "index.json"), content)
if err != nil {
@ -144,13 +149,12 @@ func generateTestData() error { // nolint: gocyclo
}
// Image dir with no blobs
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-noblobs"), 0755)
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-noblobs"), 0o755)
if err != nil {
return err
}
content = fmt.Sprintf(`{"schemaVersion":2,"manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:2a9b097b4e4c613dd8185eba55163201a221909f3d430f8df87cd3639afc5929","size":1240,"annotations":{"org.opencontainers.image.ref.name":"commit-aaa7c6e7-squashfs"},"platform":{"architecture":"amd64","os":"linux"}}]}
`)
content = `{"schemaVersion":2,"manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:2a9b097b4e4c613dd8185eba55163201a221909f3d430f8df87cd3639afc5929","size":1240,"annotations":{"org.opencontainers.image.ref.name":"commit-aaa7c6e7-squashfs"},"platform":{"architecture":"amd64","os":"linux"}}]}`
err = makeTestFile(path.Join(dbDir, "zot-squashfs-noblobs", "index.json"), content)
if err != nil {
@ -158,7 +162,7 @@ func generateTestData() error { // nolint: gocyclo
}
// Image dir with invalid blob
err = os.MkdirAll(path.Join(dbDir, "zot-squashfs-invalid-blob", "blobs/sha256"), 0755)
err = os.MkdirAll(path.Join(dbDir, "zot-squashfs-invalid-blob", "blobs/sha256"), 0o755)
if err != nil {
return err
}
@ -181,7 +185,7 @@ func generateTestData() error { // nolint: gocyclo
// Create a squashfs image
err = os.MkdirAll(path.Join(dbDir, "zot-squashfs-test", "blobs/sha256"), 0755)
err = os.MkdirAll(path.Join(dbDir, "zot-squashfs-test", "blobs/sha256"), 0o755)
if err != nil {
return err
}
@ -193,11 +197,11 @@ func generateTestData() error { // nolint: gocyclo
return err
}
if err = ioutil.WriteFile(path.Join(dbDir, "zot-squashfs-test", "oci-layout"), buf, 0644); err != nil { //nolint: gosec
if err = ioutil.WriteFile(path.Join(dbDir, "zot-squashfs-test", "oci-layout"), buf, 0o644); err != nil { //nolint: gosec
return err
}
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-test", ".uploads"), 0755)
err = os.Mkdir(path.Join(dbDir, "zot-squashfs-test", ".uploads"), 0o755)
if err != nil {
return err
}
@ -253,7 +257,7 @@ func generateTestData() error { // nolint: gocyclo
// Create a image with invalid layer blob
err = os.MkdirAll(path.Join(dbDir, "zot-invalid-layer", "blobs/sha256"), 0755)
err = os.MkdirAll(path.Join(dbDir, "zot-invalid-layer", "blobs/sha256"), 0o755)
if err != nil {
return err
}
@ -281,7 +285,7 @@ func generateTestData() error { // nolint: gocyclo
// Create a image with no layer blob
err = os.MkdirAll(path.Join(dbDir, "zot-no-layer", "blobs/sha256"), 0755)
err = os.MkdirAll(path.Join(dbDir, "zot-no-layer", "blobs/sha256"), 0o755)
if err != nil {
return err
}
@ -311,7 +315,7 @@ func generateTestData() error { // nolint: gocyclo
}
func makeTestFile(fileName string, content string) error {
if err := ioutil.WriteFile(fileName, []byte(content), 0600); err != nil {
if err := ioutil.WriteFile(fileName, []byte(content), 0o600); err != nil {
panic(err)
}
@ -390,8 +394,9 @@ func TestCVESearch(t *testing.T) {
Path: htpasswdPath,
},
}
c := api.NewController(conf)
c.Config.Storage.RootDirectory = dbDir
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = dbDir
cveConfig := &extconf.CVEConfig{
UpdateInterval: updateDuration,
}
@ -399,12 +404,12 @@ func TestCVESearch(t *testing.T) {
CVE: cveConfig,
Enable: true,
}
c.Config.Extensions = &extconf.ExtensionConfig{
ctlr.Config.Extensions = &extconf.ExtensionConfig{
Search: searchConfig,
}
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -423,7 +428,7 @@ func TestCVESearch(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
// without creds, should get access error
@ -464,9 +469,9 @@ func TestCVESearch(t *testing.T) {
So(err, ShouldBeNil)
So(len(cveResult.ImgList.CVEResultForImage.CVEList), ShouldNotBeZeroValue)
id := cveResult.ImgList.CVEResultForImage.CVEList[0].ID
cvid := cveResult.ImgList.CVEResultForImage.CVEList[0].ID
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-test\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-test\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -475,7 +480,7 @@ func TestCVESearch(t *testing.T) {
So(err, ShouldBeNil)
So(len(imgFixedCVEResult.ImgResults.ImgResultForFixedCVE.Tags), ShouldEqual, 0)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-cve-test\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-cve-test\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -483,7 +488,7 @@ func TestCVESearch(t *testing.T) {
So(err, ShouldBeNil)
So(len(imgFixedCVEResult.ImgResults.ImgResultForFixedCVE.Tags), ShouldEqual, 0)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-test\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-test\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -500,7 +505,7 @@ func TestCVESearch(t *testing.T) {
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-squashfs-noindex\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-squashfs-noindex\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -508,7 +513,7 @@ func TestCVESearch(t *testing.T) {
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-squashfs-invalid-index\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-squashfs-invalid-index\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -516,11 +521,11 @@ func TestCVESearch(t *testing.T) {
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-squashfs-noblob\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-squashfs-noblob\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-squashfs-test\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-squashfs-test\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -528,7 +533,7 @@ func TestCVESearch(t *testing.T) {
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + id + "\",image:\"zot-squashfs-invalid-blob\"){Tags{Name%20Timestamp}}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListWithCVEFixed(id:\"" + cvid + "\",image:\"zot-squashfs-invalid-blob\"){Tags{Name%20Timestamp}}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
@ -597,7 +602,7 @@ func TestCVESearch(t *testing.T) {
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 422)
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListForCVE(id:\"" + id + "\"){Name%20Tags}}")
resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + "/query?query={ImageListForCVE(id:\"" + cvid + "\"){Name%20Tags}}")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
})
@ -617,7 +622,9 @@ func TestCVEConfig(t *testing.T) {
Path: htpasswdPath,
},
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
firstDir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
@ -635,16 +642,17 @@ func TestCVEConfig(t *testing.T) {
panic(err)
}
c.Config.Storage.RootDirectory = firstDir
ctlr.Config.Storage.RootDirectory = firstDir
subPaths := make(map[string]config.StorageConfig)
subPaths["/a"] = config.StorageConfig{
RootDirectory: secondDir,
}
c.Config.Storage.SubPaths = subPaths
ctlr.Config.Storage.SubPaths = subPaths
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -676,7 +684,7 @@ func TestCVEConfig(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
})
}

View File

@ -20,6 +20,7 @@ type CveTrivyController struct {
DefaultCveConfig *TrivyCtx
SubCveConfig map[string]*TrivyCtx
}
type TrivyCtx struct {
Input string
Ctx *cli.Context

View File

@ -27,21 +27,21 @@ func (digestinfo DigestInfo) GetImageTagsByDigest(repo string, digest string) ([
uniqueTags := []*string{}
manifests, err := digestinfo.LayoutUtils.GetImageManifests(repo)
if err != nil {
digestinfo.Log.Error().Err(err).Msg("unable to read image manifests")
return uniqueTags, err
}
for _, manifest := range manifests {
imageDigest := manifest.Digest
v, ok := manifest.Annotations[ispec.AnnotationRefName]
val, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok {
imageBlobManifest, err := digestinfo.LayoutUtils.GetImageBlobManifest(repo, imageDigest)
if err != nil {
digestinfo.Log.Error().Err(err).Msg("unable to read image blob manifest")
return uniqueTags, err
}
@ -50,20 +50,20 @@ func (digestinfo DigestInfo) GetImageTagsByDigest(repo string, digest string) ([
// Check the image manigest in index.json matches the search digest
// This is a blob with mediaType application/vnd.oci.image.manifest.v1+json
if strings.Contains(manifest.Digest.String(), digest) {
tags = append(tags, &v)
tags = append(tags, &val)
}
// Check the image config matches the search digest
// This is a blob with mediaType application/vnd.oci.image.config.v1+json
if strings.Contains(imageBlobManifest.Config.Digest.Algorithm+":"+imageBlobManifest.Config.Digest.Hex, digest) {
tags = append(tags, &v)
tags = append(tags, &val)
}
// Check to see if the individual layers in the oci image manifest match the digest
// These are blobs with mediaType application/vnd.oci.image.layer.v1.tar+gzip
for _, layer := range imageBlobManifest.Layers {
if strings.Contains(layer.Digest.Algorithm+":"+layer.Digest.Hex, digest) {
tags = append(tags, &v)
tags = append(tags, &val)
}
}

View File

@ -36,10 +36,12 @@ type ImgResponseForDigest struct {
Errors []ErrorGQL `json:"errors"`
}
//nolint:tagliatelle // graphQL schema
type ImgListForDigest struct {
Images []ImgInfo `json:"ImageListForDigest"`
}
//nolint:tagliatelle // graphQL schema
type ImgInfo struct {
Name string `json:"Name"`
Tags []string `json:"Tags"`
@ -51,8 +53,7 @@ type ErrorGQL struct {
}
func init() {
err := testSetup()
if err != nil {
if err := testSetup(); err != nil {
panic(err)
}
}
@ -79,7 +80,7 @@ func testSetup() error {
// zot-cve-test 0.0.1 63a795ca 8dd57e17 75MB
// 7a0437f0 75MB
err = os.Mkdir(subDir+"/a", 0700)
err = os.Mkdir(subDir+"/a", 0o700)
if err != nil {
return err
}
@ -146,11 +147,11 @@ func TestDigestSearchHTTP(t *testing.T) {
Search: &extconf.SearchConfig{Enable: true},
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -167,7 +168,7 @@ func TestDigestSearchHTTP(t *testing.T) {
// shut down server
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
resp, err := resty.R().Get(baseURL + "/v2/")
@ -273,7 +274,7 @@ func TestDigestSearchHTTPSubPaths(t *testing.T) {
Search: &extconf.SearchConfig{Enable: true},
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
globalDir, err := ioutil.TempDir("", "digest_test")
if err != nil {
@ -281,17 +282,17 @@ func TestDigestSearchHTTPSubPaths(t *testing.T) {
}
defer os.RemoveAll(globalDir)
c.Config.Storage.RootDirectory = globalDir
ctlr.Config.Storage.RootDirectory = globalDir
subPathMap := make(map[string]config.StorageConfig)
subPathMap["/a"] = config.StorageConfig{RootDirectory: subRootDir}
c.Config.Storage.SubPaths = subPathMap
ctlr.Config.Storage.SubPaths = subPathMap
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -308,7 +309,7 @@ func TestDigestSearchHTTPSubPaths(t *testing.T) {
// shut down server
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
resp, err := resty.R().Get(baseURL + "/v2/")
@ -347,11 +348,11 @@ func TestDigestSearchDisabled(t *testing.T) {
Search: &extconf.SearchConfig{Enable: false},
}
c := api.NewController(conf)
ctlr := api.NewController(conf)
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -368,7 +369,7 @@ func TestDigestSearchDisabled(t *testing.T) {
// shut down server
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
resp, err := resty.R().Get(baseURL + "/v2/")

View File

@ -148,7 +148,7 @@ func (r *queryResolver) CVEListForImage(ctx context.Context, image string) (*CVE
return &CVEResultForImage{Tag: &copyImgTag, CVEList: cveids}, nil
}
func (r *queryResolver) ImageListForCve(ctx context.Context, id string) ([]*ImgResultForCve, error) {
func (r *queryResolver) ImageListForCve(ctx context.Context, cvid string) ([]*ImgResultForCve, error) {
finalCveResult := []*ImgResultForCve{}
r.log.Info().Msg("extracting repositories")
@ -166,7 +166,7 @@ func (r *queryResolver) ImageListForCve(ctx context.Context, id string) ([]*ImgR
r.cveInfo.Log.Info().Msg("scanning each global repository")
cveResult, err := r.getImageListForCVE(repoList, id, defaultStore, defaultTrivyCtx)
cveResult, err := r.getImageListForCVE(repoList, cvid, defaultStore, defaultTrivyCtx)
if err != nil {
r.log.Error().Err(err).Msg("error getting cve list for global repositories")
@ -187,7 +187,7 @@ func (r *queryResolver) ImageListForCve(ctx context.Context, id string) ([]*ImgR
subTrivyCtx := r.cveInfo.CveTrivyController.SubCveConfig[route]
subCveResult, err := r.getImageListForCVE(subRepoList, id, store, subTrivyCtx)
subCveResult, err := r.getImageListForCVE(subRepoList, cvid, store, subTrivyCtx)
if err != nil {
r.log.Error().Err(err).Msg("unable to get cve result for sub repositories")
@ -200,7 +200,7 @@ func (r *queryResolver) ImageListForCve(ctx context.Context, id string) ([]*ImgR
return finalCveResult, nil
}
func (r *queryResolver) getImageListForCVE(repoList []string, id string, imgStore storage.ImageStore,
func (r *queryResolver) getImageListForCVE(repoList []string, cvid string, imgStore storage.ImageStore,
trivyCtx *cveinfo.TrivyCtx) ([]*ImgResultForCve, error) {
cveResult := []*ImgResultForCve{}
@ -209,7 +209,7 @@ func (r *queryResolver) getImageListForCVE(repoList []string, id string, imgStor
name := repo
tags, err := r.cveInfo.GetImageListForCVE(repo, id, imgStore, trivyCtx)
tags, err := r.cveInfo.GetImageListForCVE(repo, cvid, imgStore, trivyCtx)
if err != nil {
r.log.Error().Err(err).Msg("error getting tag")
@ -224,7 +224,7 @@ func (r *queryResolver) getImageListForCVE(repoList []string, id string, imgStor
return cveResult, nil
}
func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, image string) (*ImgResultForFixedCve, error) { // nolint: lll
func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, cvid string, image string) (*ImgResultForFixedCve, error) { // nolint: lll
imgResultForFixedCVE := &ImgResultForFixedCve{}
r.log.Info().Str("image", image).Msg("extracting list of tags available in image")
@ -270,7 +270,7 @@ func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, im
for _, result := range report.Results {
for _, vulnerability := range result.Vulnerabilities {
if vulnerability.VulnerabilityID == id {
if vulnerability.VulnerabilityID == cvid {
hasCVE = true
break
@ -292,7 +292,7 @@ func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, im
finalTagList = getGraphqlCompatibleTags(fixedTags)
} else {
r.log.Info().Str("image", image).Str("cve-id", id).Msg("image does not contain any tag that have given cve")
r.log.Info().Str("image", image).Str("cve-id", cvid).Msg("image does not contain any tag that have given cve")
finalTagList = getGraphqlCompatibleTags(tagsInfo)
}
@ -302,7 +302,7 @@ func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, im
return imgResultForFixedCVE, nil
}
func (r *queryResolver) ImageListForDigest(ctx context.Context, id string) ([]*ImgResultForDigest, error) {
func (r *queryResolver) ImageListForDigest(ctx context.Context, digestID string) ([]*ImgResultForDigest, error) {
imgResultForDigest := []*ImgResultForDigest{}
r.log.Info().Msg("extracting repositories")
@ -318,7 +318,7 @@ func (r *queryResolver) ImageListForDigest(ctx context.Context, id string) ([]*I
r.log.Info().Msg("scanning each global repository")
partialImgResultForDigest, err := r.getImageListForDigest(repoList, id)
partialImgResultForDigest, err := r.getImageListForDigest(repoList, digestID)
if err != nil {
r.log.Error().Err(err).Msg("unable to get image and tag list for global repositories")
@ -336,7 +336,7 @@ func (r *queryResolver) ImageListForDigest(ctx context.Context, id string) ([]*I
return imgResultForDigest, err
}
partialImgResultForDigest, err = r.getImageListForDigest(subRepoList, id)
partialImgResultForDigest, err = r.getImageListForDigest(subRepoList, digestID)
if err != nil {
r.log.Error().Err(err).Msg("unable to get image and tag list for sub-repositories")

View File

@ -16,7 +16,7 @@ type PostHandler struct {
Log log.Logger
}
func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
func (h *PostHandler) Handler(response http.ResponseWriter, request *http.Request) {
var credentialsFile CredentialsFile
var err error
@ -25,7 +25,7 @@ func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
credentialsFile, err = getFileCredentials(h.Cfg.CredentialsFile)
if err != nil {
h.Log.Error().Err(err).Msgf("sync http handler: couldn't get registry credentials from %s", h.Cfg.CredentialsFile)
WriteData(w, http.StatusInternalServerError, err.Error())
WriteData(response, http.StatusInternalServerError, err.Error())
return
}
@ -33,7 +33,7 @@ func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
localCtx, policyCtx, err := getLocalContexts(h.Log)
if err != nil {
WriteData(w, http.StatusInternalServerError, err.Error())
WriteData(response, http.StatusInternalServerError, err.Error())
return
}
@ -42,7 +42,7 @@ func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
uuid, err := guuid.NewV4()
if err != nil {
WriteData(w, http.StatusInternalServerError, err.Error())
WriteData(response, http.StatusInternalServerError, err.Error())
return
}
@ -51,12 +51,14 @@ func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
// if content not provided, don't run periodically sync
if len(regCfg.Content) == 0 {
h.Log.Info().Msgf("sync config content not configured for %s, will not run periodically sync", regCfg.URL)
continue
}
// if pollInterval is not provided, don't run periodically sync
if regCfg.PollInterval == 0 {
h.Log.Warn().Msgf("sync config PollInterval not configured for %s, will not run periodically sync", regCfg.URL)
continue
}
@ -65,13 +67,13 @@ func (h *PostHandler) Handler(w http.ResponseWriter, r *http.Request) {
if err := syncRegistry(regCfg, h.StoreController, h.Log, localCtx, policyCtx,
credentialsFile[upstreamRegistryName], uuid.String()); err != nil {
h.Log.Err(err).Msg("sync http handler: error while syncing in")
WriteData(w, http.StatusInternalServerError, err.Error())
WriteData(response, http.StatusInternalServerError, err.Error())
return
}
}
WriteData(w, http.StatusOK, "")
WriteData(response, http.StatusOK, "")
}
func WriteData(w http.ResponseWriter, status int, msg string) {

View File

@ -27,6 +27,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
credentialsFile, err = getFileCredentials(cfg.CredentialsFile)
if err != nil {
log.Error().Err(err).Msgf("couldn't get registry credentials from %s", cfg.CredentialsFile)
return err
}
}
@ -48,6 +49,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
for _, regCfg := range cfg.Registries {
if !regCfg.OnDemand {
log.Info().Msgf("skipping syncing on demand from %s, onDemand flag is false", regCfg.URL)
continue
}
@ -57,6 +59,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
if len(repos) == 0 {
log.Info().Msgf("skipping syncing on demand %s from %s registry because it's filtered out by content config",
repo, regCfg.URL)
continue
}
}
@ -71,6 +74,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
upstreamRepoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", upstreamRegistryName, repo))
if err != nil {
log.Error().Err(err).Msgf("error parsing repository reference %s/%s", upstreamRegistryName, repo)
return err
}
@ -78,6 +82,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
if err != nil {
log.Error().Err(err).Msgf("error creating a reference for repository %s and tag %q",
upstreamRepoRef.Name(), tag)
return err
}
@ -85,6 +90,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
if err != nil {
log.Error().Err(err).Msgf("error creating docker reference for repository %s and tag %q",
upstreamRepoRef.Name(), tag)
return err
}
@ -92,8 +98,9 @@ func OneImage(cfg Config, storeController storage.StoreController,
localRepo := path.Join(imageStore.RootDir(), imageName, SyncBlobUploadDir, uuid.String(), imageName)
if err = os.MkdirAll(localRepo, 0755); err != nil {
if err = os.MkdirAll(localRepo, storage.DefaultDirPerms); err != nil {
log.Error().Err(err).Str("dir", localRepo).Msg("couldn't create temporary dir")
return err
}
@ -104,6 +111,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
localRef, err := layout.ParseReference(localTaggedRepo)
if err != nil {
log.Error().Err(err).Msgf("cannot obtain a valid image reference for reference %q", localRepo)
return err
}
@ -118,6 +126,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
if err = retry.RetryIfNecessary(context.Background(), func() error {
_, copyErr = copy.Image(context.Background(), policyCtx, localRef, upstreamRef, &options)
return err
}, retryOptions); copyErr != nil {
log.Error().Err(copyErr).Msgf("error while copying image %s to %s",
@ -129,6 +138,7 @@ func OneImage(cfg Config, storeController storage.StoreController,
if err != nil {
log.Error().Err(err).Msgf("error while pushing synced cached image %s",
localTaggedRepo)
return err
}

View File

@ -90,17 +90,19 @@ func getUpstreamCatalog(regCfg *RegistryConfig, credentials Credentials, log log
caCert, err := ioutil.ReadFile(caCertPath)
if err != nil {
log.Error().Err(err).Msg("couldn't read CA certificate")
return c, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
cert, err := tls.LoadX509KeyPair(clientCert, clientKey)
if err != nil {
log.Error().Err(err).Msg("couldn't read certificates key pairs")
return c, err
}
@ -120,18 +122,21 @@ func getUpstreamCatalog(regCfg *RegistryConfig, credentials Credentials, log log
resp, err := client.R().SetHeader("Content-Type", "application/json").Get(registryCatalogURL)
if err != nil {
log.Err(err).Msgf("couldn't query %s", registryCatalogURL)
return c, err
}
if resp.IsError() {
log.Error().Msgf("couldn't query %s, status code: %d, body: %s", registryCatalogURL,
resp.StatusCode(), resp.Body())
return c, errors.ErrSyncMissingCatalog
}
err = json.Unmarshal(resp.Body(), &c)
if err != nil {
log.Err(err).Str("body", string(resp.Body())).Msg("couldn't unmarshal registry's catalog")
return c, err
}
@ -171,19 +176,19 @@ func filterImagesByTagRegex(upstreamReferences *[]types.ImageReference, content
return err
}
n := 0
numTags := 0
for _, ref := range refs {
tagged := getTagFromRef(ref, log)
if tagged != nil {
if tagReg.MatchString(tagged.Tag()) {
refs[n] = ref
n++
refs[numTags] = ref
numTags++
}
}
}
refs = refs[:n]
refs = refs[:numTags]
}
*upstreamReferences = refs
@ -202,20 +207,20 @@ func filterImagesBySemver(upstreamReferences *[]types.ImageReference, content Co
if content.Tags.Semver != nil && *content.Tags.Semver {
log.Info().Msg("start filtering using semver compliant rule")
n := 0
numTags := 0
for _, ref := range refs {
tagged := getTagFromRef(ref, log)
if tagged != nil {
_, ok := semver.NewVersion(tagged.Tag())
if ok == nil {
refs[n] = ref
n++
refs[numTags] = ref
numTags++
}
}
}
refs = refs[:n]
refs = refs[:numTags]
}
*upstreamReferences = refs
@ -230,12 +235,14 @@ func imagesToCopyFromUpstream(registryName string, repos []string, upstreamCtx *
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryName, repoName))
if err != nil {
log.Error().Err(err).Msgf("couldn't parse repository reference: %s", repoRef)
return nil, err
}
tags, err := getImageTags(context.Background(), upstreamCtx, repoRef)
if err != nil {
log.Error().Err(err).Msgf("couldn't fetch tags for %s", repoRef)
return nil, err
}
@ -243,6 +250,7 @@ func imagesToCopyFromUpstream(registryName string, repos []string, upstreamCtx *
taggedRef, err := reference.WithTag(repoRef, tag)
if err != nil {
log.Err(err).Msgf("error creating a reference for repository %s and tag %q", repoRef.Name(), tag)
return nil, err
}
@ -250,6 +258,7 @@ func imagesToCopyFromUpstream(registryName string, repos []string, upstreamCtx *
if err != nil {
log.Err(err).Msgf("cannot obtain a valid image reference for transport %q and reference %s",
docker.Transport.Name(), taggedRef.String())
return nil, err
}
@ -329,9 +338,11 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
if err = retry.RetryIfNecessary(context.Background(), func() error {
catalog, err = getUpstreamCatalog(&regCfg, credentials, log)
return err
}, retryOptions); err != nil {
log.Error().Err(err).Msg("error while getting upstream catalog, retrying...")
return err
}
@ -352,15 +363,18 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
if err = retry.RetryIfNecessary(context.Background(), func() error {
refs, err := imagesToCopyFromUpstream(upstreamRegistryName, r, upstreamCtx, regCfg.Content[id], log)
images = append(images, refs...)
return err
}, retryOptions); err != nil {
log.Error().Err(err).Msg("error while getting images references from upstream, retrying...")
return err
}
}
if len(images) == 0 {
log.Info().Msg("no images to copy, no need to sync")
return nil
}
@ -374,8 +388,9 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
localRepo := path.Join(imageStore.RootDir(), imageName, SyncBlobUploadDir, uuid, imageName)
if err = os.MkdirAll(localRepo, 0755); err != nil {
if err = os.MkdirAll(localRepo, storage.DefaultDirPerms); err != nil {
log.Error().Err(err).Str("dir", localRepo).Msg("couldn't create temporary dir")
return err
}
@ -388,6 +403,7 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
localRef, err := layout.ParseReference(localTaggedRepo)
if err != nil {
log.Error().Err(err).Msgf("Cannot obtain a valid image reference for reference %q", localTaggedRepo)
return err
}
@ -396,10 +412,12 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
if err = retry.RetryIfNecessary(context.Background(), func() error {
_, err = copy.Image(context.Background(), policyCtx, localRef, upstreamRef, &options)
return err
}, retryOptions); err != nil {
log.Error().Err(err).Msgf("error while copying image %s:%s to %s",
upstreamRef.DockerReference().Name(), upstreamTaggedRef.Tag(), localTaggedRepo)
return err
}
@ -409,6 +427,7 @@ func syncRegistry(regCfg RegistryConfig, storeController storage.StoreController
if err != nil {
log.Error().Err(err).Msgf("error while pushing synced cached image %s",
localTaggedRepo)
return err
}
}
@ -433,13 +452,14 @@ func getLocalContexts(log log.Logger) (*types.SystemContext, *signature.PolicyCo
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
log.Error().Err(err).Msg("couldn't create policy context")
return &types.SystemContext{}, &signature.PolicyContext{}, err
}
return localCtx, policyContext, nil
}
func Run(cfg Config, storeController storage.StoreController, wg *goSync.WaitGroup, logger log.Logger) error {
func Run(cfg Config, storeController storage.StoreController, wtgrp *goSync.WaitGroup, logger log.Logger) error {
var credentialsFile CredentialsFile
var err error
@ -448,6 +468,7 @@ func Run(cfg Config, storeController storage.StoreController, wg *goSync.WaitGro
credentialsFile, err = getFileCredentials(cfg.CredentialsFile)
if err != nil {
logger.Error().Err(err).Msgf("couldn't get registry credentials from %s", cfg.CredentialsFile)
return err
}
}
@ -467,38 +488,40 @@ func Run(cfg Config, storeController storage.StoreController, wg *goSync.WaitGro
// if content not provided, don't run periodically sync
if len(regCfg.Content) == 0 {
logger.Info().Msgf("sync config content not configured for %s, will not run periodically sync", regCfg.URL)
continue
}
// if pollInterval is not provided, don't run periodically sync
if regCfg.PollInterval == 0 {
logger.Warn().Msgf("sync config PollInterval not configured for %s, will not run periodically sync", regCfg.URL)
continue
}
ticker := time.NewTicker(regCfg.PollInterval)
// fork a new zerolog child to avoid data race
l := log.Logger{Logger: logger.With().Caller().Timestamp().Logger()}
tlogger := log.Logger{Logger: logger.With().Caller().Timestamp().Logger()}
upstreamRegistry := strings.Replace(strings.Replace(regCfg.URL, "http://", "", 1), "https://", "", 1)
// schedule each registry sync
go func(regCfg RegistryConfig, l log.Logger) {
go func(regCfg RegistryConfig, logger log.Logger) {
// run on intervals
for ; true; <-ticker.C {
// increment reference since will be busy, so shutdown has to wait
wg.Add(1)
wtgrp.Add(1)
if err := syncRegistry(regCfg, storeController, l, localCtx, policyCtx,
if err := syncRegistry(regCfg, storeController, logger, localCtx, policyCtx,
credentialsFile[upstreamRegistry], uuid.String()); err != nil {
l.Error().Err(err).Msg("sync exited with error, stopping it...")
logger.Error().Err(err).Msg("sync exited with error, stopping it...")
ticker.Stop()
}
// mark as done after a single sync run
wg.Done()
wtgrp.Done()
}
}(regCfg, l)
}(regCfg, tlogger)
}
logger.Info().Msg("finished setting up sync")

View File

@ -54,17 +54,17 @@ func TestSyncInternal(t *testing.T) {
_, err = getFileCredentials("/path/to/inexistent/file")
So(err, ShouldNotBeNil)
f, err := ioutil.TempFile("", "sync-credentials-")
tempFile, err := ioutil.TempFile("", "sync-credentials-")
if err != nil {
panic(err)
}
content := []byte(`{`)
if err := ioutil.WriteFile(f.Name(), content, 0600); err != nil {
if err := ioutil.WriteFile(tempFile.Name(), content, 0o600); err != nil {
panic(err)
}
_, err = getFileCredentials(f.Name())
_, err = getFileCredentials(tempFile.Name())
So(err, ShouldNotBeNil)
srcCtx := &types.SystemContext{}
@ -80,7 +80,7 @@ func TestSyncInternal(t *testing.T) {
dockerRef, err := docker.NewReference(taggedRef)
So(err, ShouldBeNil)
//tag := getTagFromRef(dockerRef, log.NewLogger("", ""))
// tag := getTagFromRef(dockerRef, log.NewLogger("", ""))
So(getTagFromRef(dockerRef, log.NewLogger("debug", "")), ShouldNotBeNil)
@ -133,7 +133,7 @@ func TestSyncInternal(t *testing.T) {
panic(err)
}
if err := os.WriteFile(path.Join(badCertsDir, "ca.crt"), []byte("certificate"), 0755); err != nil {
if err := os.WriteFile(path.Join(badCertsDir, "ca.crt"), []byte("certificate"), 0o600); err != nil {
panic(err)
}
@ -217,9 +217,9 @@ func TestSyncInternal(t *testing.T) {
So(err, ShouldNotBeNil)
testRootDir := path.Join(imageStore.RootDir(), testImage, SyncBlobUploadDir)
//testImagePath := path.Join(testRootDir, testImage)
// testImagePath := path.Join(testRootDir, testImage)
err = os.MkdirAll(testRootDir, 0755)
err = os.MkdirAll(testRootDir, 0o755)
if err != nil {
panic(err)
}
@ -239,7 +239,7 @@ func TestSyncInternal(t *testing.T) {
panic(err)
}
if err := os.Chmod(storageDir, 0000); err != nil {
if err := os.Chmod(storageDir, 0o000); err != nil {
panic(err)
}
@ -250,12 +250,12 @@ func TestSyncInternal(t *testing.T) {
ShouldPanic)
}
if err := os.Chmod(storageDir, 0755); err != nil {
if err := os.Chmod(storageDir, 0o755); err != nil {
panic(err)
}
if err := os.Chmod(path.Join(testRootDir, testImage, "blobs", "sha256",
manifest.Layers[0].Digest.Hex()), 0000); err != nil {
manifest.Layers[0].Digest.Hex()), 0o000); err != nil {
panic(err)
}
@ -263,25 +263,25 @@ func TestSyncInternal(t *testing.T) {
So(err, ShouldNotBeNil)
if err := os.Chmod(path.Join(testRootDir, testImage, "blobs", "sha256",
manifest.Layers[0].Digest.Hex()), 0755); err != nil {
manifest.Layers[0].Digest.Hex()), 0o755); err != nil {
panic(err)
}
cachedManifestConfigPath := path.Join(imageStore.RootDir(), testImage, SyncBlobUploadDir,
testImage, "blobs", "sha256", manifest.Config.Digest.Hex())
if err := os.Chmod(cachedManifestConfigPath, 0000); err != nil {
if err := os.Chmod(cachedManifestConfigPath, 0o000); err != nil {
panic(err)
}
err = pushSyncedLocalImage(testImage, testImageTag, "", storeController, log)
So(err, ShouldNotBeNil)
if err := os.Chmod(cachedManifestConfigPath, 0755); err != nil {
if err := os.Chmod(cachedManifestConfigPath, 0o755); err != nil {
panic(err)
}
manifestConfigPath := path.Join(imageStore.RootDir(), testImage, "blobs", "sha256", manifest.Config.Digest.Hex())
if err := os.MkdirAll(manifestConfigPath, 0000); err != nil {
if err := os.MkdirAll(manifestConfigPath, 0o000); err != nil {
panic(err)
}
@ -295,7 +295,7 @@ func TestSyncInternal(t *testing.T) {
mDigest := godigest.FromBytes(manifestContent)
manifestPath := path.Join(imageStore.RootDir(), testImage, "blobs", mDigest.Algorithm().String(), mDigest.Encoded())
if err := os.MkdirAll(manifestPath, 0000); err != nil {
if err := os.MkdirAll(manifestPath, 0o000); err != nil {
panic(err)
}

View File

@ -96,7 +96,7 @@ func startUpstreamServer(secure, basicAuth bool) (*api.Controller, string, strin
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
cert, err := tls.LoadX509KeyPair("../../../test/data/client.cert", "../../../test/data/client.key")
if err != nil {
@ -132,11 +132,11 @@ func startUpstreamServer(secure, basicAuth bool) (*api.Controller, string, strin
srcConfig.Storage.RootDirectory = srcDir
sc := api.NewController(srcConfig)
sctlr := api.NewController(srcConfig)
go func() {
// this blocks
if err := sc.Run(); err != nil {
if err := sctlr.Run(); err != nil {
return
}
}()
@ -151,7 +151,7 @@ func startUpstreamServer(secure, basicAuth bool) (*api.Controller, string, strin
time.Sleep(100 * time.Millisecond)
}
return sc, srcBaseURL, srcDir, htpasswdPath, client
return sctlr, srcBaseURL, srcDir, htpasswdPath, client
}
func startDownstreamServer(secure bool, syncConfig *sync.Config) (*api.Controller, string, string, *resty.Client) {
@ -179,7 +179,7 @@ func startDownstreamServer(secure bool, syncConfig *sync.Config) (*api.Controlle
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool})
client.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
cert, err := tls.LoadX509KeyPair("../../../test/data/client.cert", "../../../test/data/client.key")
if err != nil {
@ -204,11 +204,11 @@ func startDownstreamServer(secure bool, syncConfig *sync.Config) (*api.Controlle
destConfig.Extensions.Search = nil
destConfig.Extensions.Sync = syncConfig
dc := api.NewController(destConfig)
dctlr := api.NewController(destConfig)
go func() {
// this blocks
if err := dc.Run(); err != nil {
if err := dctlr.Run(); err != nil {
return
}
}()
@ -223,7 +223,7 @@ func startDownstreamServer(secure bool, syncConfig *sync.Config) (*api.Controlle
time.Sleep(100 * time.Millisecond)
}
return dc, destBaseURL, destDir, client
return dctlr, destBaseURL, destDir, client
}
func TestSyncOnDemand(t *testing.T) {
@ -257,13 +257,14 @@ func TestSyncOnDemand(t *testing.T) {
}
syncConfig := &sync.Config{
Registries: []sync.RegistryConfig{syncRegistryConfig}}
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
dc, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
dctlr, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
defer os.RemoveAll(destDir)
defer func() {
dc.Shutdown()
dctlr.Shutdown()
}()
var srcTagsList TagsList
@ -286,7 +287,7 @@ func TestSyncOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 404)
err = os.Chmod(path.Join(destDir, testImage), 0000)
err = os.Chmod(path.Join(destDir, testImage), 0o000)
if err != nil {
panic(err)
}
@ -295,7 +296,7 @@ func TestSyncOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 500)
err = os.Chmod(path.Join(destDir, testImage), 0755)
err = os.Chmod(path.Join(destDir, testImage), 0o755)
if err != nil {
panic(err)
}
@ -304,7 +305,7 @@ func TestSyncOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 404)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0000)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o000)
if err != nil {
panic(err)
}
@ -317,12 +318,12 @@ func TestSyncOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 404)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0755)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o755)
if err != nil {
panic(err)
}
err = os.MkdirAll(path.Join(destDir, testImage, "blobs"), 0000)
err = os.MkdirAll(path.Join(destDir, testImage, "blobs"), 0o000)
if err != nil {
panic(err)
}
@ -331,7 +332,7 @@ func TestSyncOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 404)
err = os.Chmod(path.Join(destDir, testImage, "blobs"), 0755)
err = os.Chmod(path.Join(destDir, testImage, "blobs"), 0o755)
if err != nil {
panic(err)
}
@ -357,11 +358,11 @@ func TestSync(t *testing.T) {
Convey("Verify sync feature", t, func() {
updateDuration, _ := time.ParseDuration("30m")
sc, srcBaseURL, srcDir, _, srcClient := startUpstreamServer(false, false)
sctlr, srcBaseURL, srcDir, _, srcClient := startUpstreamServer(false, false)
defer os.RemoveAll(srcDir)
defer func() {
sc.Shutdown()
sctlr.Shutdown()
}()
regex := ".*"
@ -569,7 +570,7 @@ func TestSyncPermsDenied(t *testing.T) {
dc.Shutdown()
}()
err := os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0000)
err := os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o000)
if err != nil {
panic(err)
}
@ -776,8 +777,10 @@ func TestSyncBasicAuth(t *testing.T) {
CertDir: "",
}
syncConfig := &sync.Config{CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig}}
syncConfig := &sync.Config{
CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
dc, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
defer os.RemoveAll(destDir)
@ -872,20 +875,22 @@ func TestSyncBasicAuth(t *testing.T) {
destConfig.Extensions = &extconf.ExtensionConfig{}
destConfig.Extensions.Search = nil
destConfig.Extensions.Sync = &sync.Config{CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig}}
destConfig.Extensions.Sync = &sync.Config{
CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
dc := api.NewController(destConfig)
dctlr := api.NewController(destConfig)
go func() {
// this blocks
if err := dc.Run(); err != nil {
if err := dctlr.Run(); err != nil {
return
}
}()
defer func() {
dc.Shutdown()
dctlr.Shutdown()
}()
// wait till ready
@ -915,11 +920,11 @@ func TestSyncBasicAuth(t *testing.T) {
credentialsFile := makeCredentialsFile(fmt.Sprintf(`{"%s":{"username": "test", "password": "test"}}`,
registryName))
err := os.Chmod(credentialsFile, 0000)
err := os.Chmod(credentialsFile, 0o000)
So(err, ShouldBeNil)
defer func() {
So(os.Chmod(credentialsFile, 0755), ShouldBeNil)
So(os.Chmod(credentialsFile, 0o755), ShouldBeNil)
So(os.RemoveAll(credentialsFile), ShouldBeNil)
}()
@ -943,8 +948,10 @@ func TestSyncBasicAuth(t *testing.T) {
CertDir: "",
}
syncConfig := &sync.Config{CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig}}
syncConfig := &sync.Config{
CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
dc, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
defer os.RemoveAll(destDir)
@ -985,16 +992,20 @@ func TestSyncBasicAuth(t *testing.T) {
}
// add file path to the credentials
syncConfig := &sync.Config{CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{unreacheableSyncRegistryConfig1,
syncConfig := &sync.Config{
CredentialsFile: credentialsFile,
Registries: []sync.RegistryConfig{
unreacheableSyncRegistryConfig1,
unreacheableSyncRegistryConfig2,
syncRegistryConfig}}
syncRegistryConfig,
},
}
dc, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
dctlr, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
defer os.RemoveAll(destDir)
defer func() {
dc.Shutdown()
dctlr.Shutdown()
}()
var srcTagsList TagsList
@ -1021,7 +1032,7 @@ func TestSyncBasicAuth(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
err = dc.StoreController.DefaultStore.DeleteImageManifest(testImage, testImageTag)
err = dctlr.StoreController.DefaultStore.DeleteImageManifest(testImage, testImageTag)
So(err, ShouldBeNil)
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + "1.1.1")
@ -1300,14 +1311,14 @@ func TestSyncInvalidCerts(t *testing.T) {
panic(err)
}
f, err := os.OpenFile(destFilePath, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0600)
dstfile, err := os.OpenFile(destFilePath, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0o600)
if err != nil {
panic(err)
}
defer f.Close()
defer dstfile.Close()
if _, err = f.WriteString("Add Invalid Text In Cert"); err != nil {
if _, err = dstfile.WriteString("Add Invalid Text In Cert"); err != nil {
panic(err)
}
@ -1358,17 +1369,17 @@ func TestSyncInvalidCerts(t *testing.T) {
}
func makeCredentialsFile(fileContent string) string {
f, err := ioutil.TempFile("", "sync-credentials-")
tmpfile, err := ioutil.TempFile("", "sync-credentials-")
if err != nil {
panic(err)
}
content := []byte(fileContent)
if err := ioutil.WriteFile(f.Name(), content, 0600); err != nil {
if err := ioutil.WriteFile(tmpfile.Name(), content, 0o600); err != nil {
panic(err)
}
return f.Name()
return tmpfile.Name()
}
func TestSyncInvalidUrl(t *testing.T) {
@ -1445,7 +1456,8 @@ func TestSyncInvalidTags(t *testing.T) {
}
syncConfig := &sync.Config{
Registries: []sync.RegistryConfig{syncRegistryConfig}}
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
dc, destBaseURL, destDir, destClient := startDownstreamServer(false, syncConfig)
defer os.RemoveAll(destDir)
@ -1485,11 +1497,11 @@ func TestSyncSubPaths(t *testing.T) {
srcConfig.Storage.RootDirectory = srcDir
sc := api.NewController(srcConfig)
sctlr := api.NewController(srcConfig)
go func() {
// this blocks
if err := sc.Run(); err != nil {
if err := sctlr.Run(); err != nil {
return
}
}()
@ -1505,7 +1517,7 @@ func TestSyncSubPaths(t *testing.T) {
}
defer func() {
sc.Shutdown()
sctlr.Shutdown()
}()
regex := ".*"
@ -1530,7 +1542,8 @@ func TestSyncSubPaths(t *testing.T) {
}
syncConfig := &sync.Config{
Registries: []sync.RegistryConfig{syncRegistryConfig}}
Registries: []sync.RegistryConfig{syncRegistryConfig},
}
destPort := GetFreePort()
destConfig := config.New()
@ -1564,11 +1577,11 @@ func TestSyncSubPaths(t *testing.T) {
destConfig.Extensions.Search = nil
destConfig.Extensions.Sync = syncConfig
dc := api.NewController(destConfig)
dctlr := api.NewController(destConfig)
go func() {
// this blocks
if err := dc.Run(); err != nil {
if err := dctlr.Run(); err != nil {
return
}
}()
@ -1584,7 +1597,7 @@ func TestSyncSubPaths(t *testing.T) {
}
defer func() {
dc.Shutdown()
dctlr.Shutdown()
}()
var destTagsList TagsList
@ -1608,13 +1621,13 @@ func TestSyncSubPaths(t *testing.T) {
}
// synced image should get into subpath instead of rootDir
fi, err := os.Stat(path.Join(subPathDestDir, subpath, testImage, "blobs/sha256"))
So(fi, ShouldNotBeNil)
binfo, err := os.Stat(path.Join(subPathDestDir, subpath, testImage, "blobs/sha256"))
So(binfo, ShouldNotBeNil)
So(err, ShouldBeNil)
// check rootDir is not populated with any image.
fi, err = os.Stat(path.Join(destDir, subpath))
So(fi, ShouldBeNil)
binfo, err = os.Stat(path.Join(destDir, subpath))
So(binfo, ShouldBeNil)
So(err, ShouldNotBeNil)
})
}
@ -1636,7 +1649,7 @@ func TestSyncOnDemandContentFiltering(t *testing.T) {
syncRegistryConfig := sync.RegistryConfig{
Content: []sync.Content{
{
//should be filtered out
// should be filtered out
Prefix: "dummy",
Tags: &sync.Tags{
Regex: &regex,

View File

@ -42,28 +42,30 @@ func parseRepositoryReference(input string) (reference.Named, error) {
}
// filterRepos filters repos based on prefix given in the config.
func filterRepos(repos []string, content []Content, log log.Logger) map[int][]string {
func filterRepos(repos []string, contentList []Content, log log.Logger) map[int][]string {
filtered := make(map[int][]string)
for _, repo := range repos {
for contentID, c := range content {
for contentID, content := range contentList {
var prefix string
// handle prefixes starting with '/'
if strings.HasPrefix(c.Prefix, "/") {
prefix = c.Prefix[1:]
if strings.HasPrefix(content.Prefix, "/") {
prefix = content.Prefix[1:]
} else {
prefix = c.Prefix
prefix = content.Prefix
}
matched, err := glob.Match(prefix, repo)
if err != nil {
log.Error().Err(err).Str("pattern",
prefix).Msg("error while parsing glob pattern, skipping it...")
continue
}
if matched {
filtered[contentID] = append(filtered[contentID], repo)
break
}
}
@ -74,14 +76,14 @@ func filterRepos(repos []string, content []Content, log log.Logger) map[int][]st
// Get sync.FileCredentials from file.
func getFileCredentials(filepath string) (CredentialsFile, error) {
f, err := ioutil.ReadFile(filepath)
credsFile, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
var creds CredentialsFile
err = json.Unmarshal(f, &creds)
err = json.Unmarshal(credsFile, &creds)
if err != nil {
return nil, err
}
@ -102,6 +104,7 @@ func pushSyncedLocalImage(repo, tag, uuid string,
manifestContent, _, _, err := cacheImageStore.GetImageManifest(repo, tag)
if err != nil {
log.Error().Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), repo)).Msg("couldn't find index.json")
return err
}
@ -109,6 +112,7 @@ func pushSyncedLocalImage(repo, tag, uuid string,
if err := json.Unmarshal(manifestContent, &manifest); err != nil {
log.Error().Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), repo)).Msg("invalid JSON")
return err
}
@ -117,12 +121,14 @@ func pushSyncedLocalImage(repo, tag, uuid string,
if err != nil {
log.Error().Err(err).Str("dir", path.Join(cacheImageStore.RootDir(),
repo)).Str("blob digest", blob.Digest.String()).Msg("couldn't read blob")
return err
}
_, _, err = imageStore.FullBlobUpload(repo, blobReader, blob.Digest.String())
if err != nil {
log.Error().Err(err).Str("blob digest", blob.Digest.String()).Msg("couldn't upload blob")
return err
}
}
@ -131,18 +137,21 @@ func pushSyncedLocalImage(repo, tag, uuid string,
if err != nil {
log.Error().Err(err).Str("dir", path.Join(cacheImageStore.RootDir(),
repo)).Str("blob digest", manifest.Config.Digest.String()).Msg("couldn't read config blob")
return err
}
_, _, err = imageStore.FullBlobUpload(repo, blobReader, manifest.Config.Digest.String())
if err != nil {
log.Error().Err(err).Str("blob digest", manifest.Config.Digest.String()).Msg("couldn't upload config blob")
return err
}
_, err = imageStore.PutImageManifest(repo, tag, ispec.MediaTypeImageManifest, manifestContent)
if err != nil {
log.Error().Err(err).Msg("couldn't upload manifest")
return err
}
@ -150,6 +159,7 @@ func pushSyncedLocalImage(repo, tag, uuid string,
if err := os.RemoveAll(path.Join(cacheImageStore.RootDir(), repo)); err != nil {
log.Error().Err(err).Msg("couldn't remove locally cached sync repo")
return err
}

View File

@ -10,6 +10,8 @@ import (
"github.com/rs/zerolog"
)
const defaultPerms = 0o0600
// Logger extends zerolog's Logger.
type Logger struct {
zerolog.Logger
@ -21,8 +23,8 @@ func (l Logger) Println(v ...interface{}) {
func NewLogger(level string, output string) Logger {
zerolog.TimeFieldFormat = time.RFC3339Nano
lvl, err := zerolog.ParseLevel(level)
lvl, err := zerolog.ParseLevel(level)
if err != nil {
panic(err)
}
@ -34,7 +36,7 @@ func NewLogger(level string, output string) Logger {
if output == "" {
log = zerolog.New(os.Stdout)
} else {
file, err := os.OpenFile(output, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
file, err := os.OpenFile(output, os.O_APPEND|os.O_WRONLY|os.O_CREATE, defaultPerms)
if err != nil {
panic(err)
}
@ -46,8 +48,8 @@ func NewLogger(level string, output string) Logger {
func NewAuditLogger(level string, audit string) *Logger {
zerolog.TimeFieldFormat = time.RFC3339Nano
lvl, err := zerolog.ParseLevel(level)
lvl, err := zerolog.ParseLevel(level)
if err != nil {
panic(err)
}
@ -56,7 +58,7 @@ func NewAuditLogger(level string, audit string) *Logger {
var auditLog zerolog.Logger
auditFile, err := os.OpenFile(audit, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
auditFile, err := os.OpenFile(audit, os.O_APPEND|os.O_WRONLY|os.O_CREATE, defaultPerms)
if err != nil {
panic(err)
}

View File

@ -32,7 +32,7 @@ const (
type AuditLog struct {
Level string `json:"level"`
ClientIP string `json:"clientIP"`
ClientIP string `json:"clientIP"` //nolint:tagliatelle // keep IP
Subject string `json:"subject"`
Action string `json:"action"`
Object string `json:"object"`
@ -71,11 +71,11 @@ func TestAuditLogMessages(t *testing.T) {
},
}
c := api.NewController(conf)
c.Config.Storage.RootDirectory = dir
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = dir
go func() {
// this blocks
if err := c.Run(); err != nil {
if err := ctlr.Run(); err != nil {
return
}
}()
@ -91,7 +91,7 @@ func TestAuditLogMessages(t *testing.T) {
defer func() {
ctx := context.Background()
_ = c.Server.Shutdown(ctx)
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Open auditLog file", func() {

View File

@ -33,27 +33,31 @@ func NewCache(rootDir string, name string, log zlog.Logger) *Cache {
Timeout: dbCacheLockCheckTimeout,
FreelistType: bbolt.FreelistArrayType,
}
db, err := bbolt.Open(dbPath, 0600, dbOpts)
cacheDB, err := bbolt.Open(dbPath, 0o600, dbOpts) //nolint:gomnd
if err != nil {
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
return nil
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := cacheDB.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(BlobsCache)); err != nil {
// this is a serious failure
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create a root bucket")
return err
}
return nil
}); err != nil {
// something went wrong
log.Error().Err(err).Msg("unable to create a cache")
return nil
}
return &Cache{rootDir: rootDir, db: db, log: log}
return &Cache{rootDir: rootDir, db: cacheDB, log: log}
}
func (c *Cache) PutBlob(digest string, path string) error {
@ -75,18 +79,24 @@ func (c *Cache) PutBlob(digest string, path string) error {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b, err := root.CreateBucketIfNotExists([]byte(digest))
bucket, err := root.CreateBucketIfNotExists([]byte(digest))
if err != nil {
// this is a serious failure
c.log.Error().Err(err).Str("bucket", digest).Msg("unable to create a bucket")
return err
}
if err := b.Put([]byte(relp), nil); err != nil {
if err := bucket.Put([]byte(relp), nil); err != nil {
c.log.Error().Err(err).Str("bucket", digest).Str("value", relp).Msg("unable to put record")
return err
}
return nil
}); err != nil {
return err
@ -104,6 +114,7 @@ func (c *Cache) GetBlob(digest string) (string, error) {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
@ -113,6 +124,7 @@ func (c *Cache) GetBlob(digest string) (string, error) {
c := b.Cursor()
k, _ := c.First()
blobPath.WriteString(string(k))
return nil
}
@ -131,6 +143,7 @@ func (c *Cache) HasBlob(digest string, blob string) bool {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
@ -138,6 +151,7 @@ func (c *Cache) HasBlob(digest string, blob string) bool {
if b == nil {
return errors.ErrCacheMiss
}
if b.Get([]byte(blob)) == nil {
return errors.ErrCacheMiss
}
@ -163,26 +177,29 @@ func (c *Cache) DeleteBlob(digest string, path string) error {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b := root.Bucket([]byte(digest))
if b == nil {
bucket := root.Bucket([]byte(digest))
if bucket == nil {
return errors.ErrCacheMiss
}
if err := b.Delete([]byte(relp)); err != nil {
if err := bucket.Delete([]byte(relp)); err != nil {
c.log.Error().Err(err).Str("digest", digest).Str("path", relp).Msg("unable to delete")
return err
}
cur := b.Cursor()
k, _ := cur.First()
cur := bucket.Cursor()
k, _ := cur.First()
if k == nil {
c.log.Debug().Str("digest", digest).Str("path", relp).Msg("deleting empty bucket")
if err := root.DeleteBucket([]byte(digest)); err != nil {
c.log.Error().Err(err).Str("digest", digest).Str("path", relp).Msg("unable to delete")
return err
}
}

View File

@ -24,34 +24,34 @@ func TestCache(t *testing.T) {
So(storage.NewCache("/deadBEEF", "cache_test", log), ShouldBeNil)
c := storage.NewCache(dir, "cache_test", log)
So(c, ShouldNotBeNil)
cache := storage.NewCache(dir, "cache_test", log)
So(cache, ShouldNotBeNil)
v, err := c.GetBlob("key")
val, err := cache.GetBlob("key")
So(err, ShouldEqual, errors.ErrCacheMiss)
So(v, ShouldBeEmpty)
So(val, ShouldBeEmpty)
b := c.HasBlob("key", "value")
So(b, ShouldBeFalse)
exists := cache.HasBlob("key", "value")
So(exists, ShouldBeFalse)
err = c.PutBlob("key", path.Join(dir, "value"))
err = cache.PutBlob("key", path.Join(dir, "value"))
So(err, ShouldBeNil)
b = c.HasBlob("key", "value")
So(b, ShouldBeTrue)
exists = cache.HasBlob("key", "value")
So(exists, ShouldBeTrue)
v, err = c.GetBlob("key")
val, err = cache.GetBlob("key")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(val, ShouldNotBeEmpty)
err = c.DeleteBlob("bogusKey", "bogusValue")
err = cache.DeleteBlob("bogusKey", "bogusValue")
So(err, ShouldEqual, errors.ErrCacheMiss)
err = c.DeleteBlob("key", "bogusValue")
err = cache.DeleteBlob("key", "bogusValue")
So(err, ShouldBeNil)
// try to insert empty path
err = c.PutBlob("key", "")
err = cache.PutBlob("key", "")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrEmptyValue)
})

View File

@ -11,28 +11,22 @@ import (
"os"
"path"
"strings"
"testing"
"time"
godigest "github.com/opencontainers/go-digest"
//"strings"
"testing"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
guuid "github.com/gofrs/uuid"
godigest "github.com/opencontainers/go-digest"
"github.com/rs/zerolog"
. "github.com/smartystreets/goconvey/convey"
"gopkg.in/resty.v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/s3"
// Add s3 support
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
"gopkg.in/resty.v1"
)
// nolint: gochecknoglobals
@ -44,17 +38,19 @@ var (
errS3 = errors.New(errorText)
)
func cleanupStorage(store storageDriver.StorageDriver, name string) {
func cleanupStorage(store driver.StorageDriver, name string) {
_ = store.Delete(context.Background(), name)
}
func skipIt(t *testing.T) {
t.Helper()
if os.Getenv("S3MOCK_ENDPOINT") == "" {
t.Skip("Skipping testing without AWS S3 mock server")
}
}
func createMockStorage(rootDir string, store storageDriver.StorageDriver) storage.ImageStore {
func createMockStorage(rootDir string, store driver.StorageDriver) storage.ImageStore {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := s3.NewImageStore(rootDir, false, false, log, metrics, store)
@ -62,7 +58,7 @@ func createMockStorage(rootDir string, store storageDriver.StorageDriver) storag
return il
}
func createObjectsStore(rootDir string) (storageDriver.StorageDriver, storage.ImageStore, error) {
func createObjectsStore(rootDir string) (driver.StorageDriver, storage.ImageStore, error) {
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := map[string]interface{}{
@ -167,12 +163,12 @@ type StorageDriverMock struct {
getContentFn func(ctx context.Context, path string) ([]byte, error)
putContentFn func(ctx context.Context, path string, content []byte) error
readerFn func(ctx context.Context, path string, offset int64) (io.ReadCloser, error)
writerFn func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error)
statFn func(ctx context.Context, path string) (storageDriver.FileInfo, error)
writerFn func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error)
statFn func(ctx context.Context, path string) (driver.FileInfo, error)
listFn func(ctx context.Context, path string) ([]string, error)
moveFn func(ctx context.Context, sourcePath string, destPath string) error
deleteFn func(ctx context.Context, path string) error
walkFn func(ctx context.Context, path string, f storageDriver.WalkFn) error
walkFn func(ctx context.Context, path string, f driver.WalkFn) error
}
func (s *StorageDriverMock) Name() string {
@ -207,15 +203,15 @@ func (s *StorageDriverMock) Reader(ctx context.Context, path string, offset int6
return ioutil.NopCloser(strings.NewReader("")), nil
}
func (s *StorageDriverMock) Writer(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
func (s *StorageDriverMock) Writer(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
if s != nil && s.writerFn != nil {
return s.writerFn(ctx, path, append)
return s.writerFn(ctx, path, isAppend)
}
return &FileWriterMock{}, nil
}
func (s *StorageDriverMock) Stat(ctx context.Context, path string) (storageDriver.FileInfo, error) {
func (s *StorageDriverMock) Stat(ctx context.Context, path string) (driver.FileInfo, error) {
if s != nil && s.statFn != nil {
return s.statFn(ctx, path)
}
@ -251,7 +247,7 @@ func (s *StorageDriverMock) URLFor(ctx context.Context, path string, options map
return "", nil
}
func (s *StorageDriverMock) Walk(ctx context.Context, path string, f storageDriver.WalkFn) error {
func (s *StorageDriverMock) Walk(ctx context.Context, path string, f driver.WalkFn) error {
if s != nil && s.walkFn != nil {
return s.walkFn(ctx, path, f)
}
@ -269,117 +265,117 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
testDir := path.Join("/oci-repo-test", uuid.String())
store, il, _ := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
storeDriver, imgStore, _ := createObjectsStore(testDir)
defer cleanupStorage(storeDriver, testDir)
Convey("Invalid validate repo", t, func(c C) {
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
objects, err := store.List(context.Background(), path.Join(il.RootDir(), testImage))
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo(testImage), ShouldBeNil)
objects, err := storeDriver.List(context.Background(), path.Join(imgStore.RootDir(), testImage))
So(err, ShouldBeNil)
for _, object := range objects {
t.Logf("Removing object: %s", object)
err := store.Delete(context.Background(), object)
err := storeDriver.Delete(context.Background(), object)
So(err, ShouldBeNil)
}
_, err = il.ValidateRepo(testImage)
_, err = imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
_, err = il.GetRepositories()
_, err = imgStore.GetRepositories()
So(err, ShouldBeNil)
})
Convey("Invalid get image tags", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
storeDriver, imgStore, err := createObjectsStore(testDir)
defer cleanupStorage(storeDriver, testDir)
So(err, ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
So(storeDriver.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
path.Join(testDir, testImage, "blobs")), ShouldBeNil)
ok, _ := il.ValidateRepo(testImage)
ok, _ := imgStore.ValidateRepo(testImage)
So(ok, ShouldBeFalse)
_, err = il.GetImageTags(testImage)
_, err = imgStore.GetImageTags(testImage)
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(storeDriver.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, err = il.GetImageTags(testImage)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(storeDriver.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, err = imgStore.GetImageTags(testImage)
So(err, ShouldNotBeNil)
})
Convey("Invalid get image manifest", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
storeDriver, imgStore, err := createObjectsStore(testDir)
defer cleanupStorage(storeDriver, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, _, _, err = il.GetImageManifest(testImage, "")
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(storeDriver.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest(testImage, "")
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, _, _, err = il.GetImageManifest(testImage, "")
So(storeDriver.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(storeDriver.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest(testImage, "")
So(err, ShouldNotBeNil)
})
Convey("Invalid validate repo", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
storeDriver, imgStore, err := createObjectsStore(testDir)
defer cleanupStorage(storeDriver, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(imgStore, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, err = il.ValidateRepo(testImage)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(storeDriver.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, err = imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
So(storeDriver.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(imgStore.InitRepo(testImage), ShouldBeNil)
So(storeDriver.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
path.Join(testDir, testImage, "_index.json")), ShouldBeNil)
ok, err := il.ValidateRepo(testImage)
ok, err := imgStore.ValidateRepo(testImage)
So(err, ShouldBeNil)
So(ok, ShouldBeFalse)
})
Convey("Invalid finish blob upload", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
storeDriver, imgStore, err := createObjectsStore(testDir)
defer cleanupStorage(storeDriver, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(imgStore, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
v, err := il.NewBlobUpload(testImage)
So(imgStore.InitRepo(testImage), ShouldBeNil)
upload, err := imgStore.NewBlobUpload(testImage)
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
buflen := buf.Len()
digest := godigest.FromBytes(content)
b, err := il.PutBlobChunk(testImage, v, 0, int64(l), buf)
blob, err := imgStore.PutBlobChunk(testImage, upload, 0, int64(buflen), buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
src := il.BlobUploadPath(testImage, v)
fw, err := store.Writer(context.Background(), src, true)
src := imgStore.BlobUploadPath(testImage, upload)
stwr, err := storeDriver.Writer(context.Background(), src, true)
So(err, ShouldBeNil)
_, err = fw.Write([]byte("another-chunk-of-data"))
_, err = stwr.Write([]byte("another-chunk-of-data"))
So(err, ShouldBeNil)
err = fw.Close()
err = stwr.Close()
So(err, ShouldBeNil)
err = il.FinishBlobUpload(testImage, v, buf, d.String())
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest.String())
So(err, ShouldNotBeNil)
})
Convey("Test storage driver errors", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{testImage}, errS3
},
@ -392,202 +388,202 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
putContentFn: func(ctx context.Context, path string, content []byte) error {
return errS3
},
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("")), errS3
},
walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error {
walkFn: func(ctx context.Context, path string, f driver.WalkFn) error {
return errS3
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
statFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
return &FileInfoMock{}, errS3
},
deleteFn: func(ctx context.Context, path string) error {
return errS3
},
})
So(il, ShouldNotBeNil)
So(imgStore, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldNotBeNil)
_, err := il.ValidateRepo(testImage)
So(imgStore.InitRepo(testImage), ShouldNotBeNil)
_, err := imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
v, err := il.NewBlobUpload(testImage)
upload, err := imgStore.NewBlobUpload(testImage)
So(err, ShouldNotBeNil)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
buflen := buf.Len()
digest := godigest.FromBytes(content)
_, err = il.PutBlobChunk(testImage, v, 0, int64(l), buf)
_, err = imgStore.PutBlobChunk(testImage, upload, 0, int64(buflen), buf)
So(err, ShouldNotBeNil)
err = il.FinishBlobUpload(testImage, v, buf, d.String())
err = imgStore.FinishBlobUpload(testImage, upload, buf, digest.String())
So(err, ShouldNotBeNil)
err = il.DeleteBlob(testImage, d.String())
err = imgStore.DeleteBlob(testImage, digest.String())
So(err, ShouldNotBeNil)
err = il.DeleteBlobUpload(testImage, v)
err = imgStore.DeleteBlobUpload(testImage, upload)
So(err, ShouldNotBeNil)
err = il.DeleteImageManifest(testImage, "1.0")
err = imgStore.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest(testImage, "1.0", "application/json", []byte{})
_, err = imgStore.PutImageManifest(testImage, "1.0", "application/json", []byte{})
So(err, ShouldNotBeNil)
_, err = il.PutBlobChunkStreamed(testImage, v, bytes.NewBuffer([]byte(testImage)))
_, err = imgStore.PutBlobChunkStreamed(testImage, upload, bytes.NewBuffer([]byte(testImage)))
So(err, ShouldNotBeNil)
_, _, err = il.FullBlobUpload(testImage, bytes.NewBuffer([]byte{}), "inexistent")
_, _, err = imgStore.FullBlobUpload(testImage, bytes.NewBuffer([]byte{}), "inexistent")
So(err, ShouldNotBeNil)
_, _, err = il.CheckBlob(testImage, d.String())
_, _, err = imgStore.CheckBlob(testImage, digest.String())
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{testImage, testImage}, errS3
},
})
_, err := il.ValidateRepo(testImage)
_, err := imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
statFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
return &FileInfoMock{}, nil
},
})
_, err := il.ValidateRepo(testImage)
_, err := imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
statFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
return &FileInfoMock{}, nil
},
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return []byte{}, errS3
},
})
_, err := il.ValidateRepo(testImage)
_, err := imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo4", t, func(c C) {
ociLayout := []byte(`{"imageLayoutVersion": "9.9.9"}`)
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
statFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
return &FileInfoMock{}, nil
},
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return ociLayout, nil
},
})
_, err := il.ValidateRepo(testImage)
_, err := imgStore.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test GetRepositories", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error {
imgStore = createMockStorage(testDir, &StorageDriverMock{
walkFn: func(ctx context.Context, path string, f driver.WalkFn) error {
return f(new(FileInfoMock))
},
})
repos, err := il.GetRepositories()
repos, err := imgStore.GetRepositories()
So(repos, ShouldBeEmpty)
So(err, ShouldBeNil)
})
Convey("Test DeleteImageManifest", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return []byte{}, errS3
},
})
err := il.DeleteImageManifest(testImage, "1.0")
err := imgStore.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
})
Convey("Test DeleteImageManifest2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{})
err := il.DeleteImageManifest(testImage, "1.0")
imgStore = createMockStorage(testDir, &StorageDriverMock{})
err := imgStore.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
})
Convey("Test NewBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
putContentFn: func(ctx context.Context, path string, content []byte) error {
return errS3
},
})
_, err := il.NewBlobUpload(testImage)
_, err := imgStore.NewBlobUpload(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test GetBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
statFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
return &FileInfoMock{}, errS3
},
})
_, err := il.GetBlobUpload(testImage, "uuid")
_, err := imgStore.GetBlobUpload(testImage, "uuid")
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunkStreamed", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
_, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
_, err := imgStore.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunkStreamed2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{writeFn: func(b []byte) (int, error) {
return 0, errS3
}}, nil
},
})
_, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
_, err := imgStore.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
_, err := imgStore.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{
writeFn: func(b []byte) (int, error) {
return 0, errS3
@ -598,13 +594,13 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
}, nil
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
_, err := imgStore.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{
writeFn: func(b []byte) (int, error) {
return 0, errS3
@ -612,13 +608,13 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
}, nil
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 12, 100, ioutil.NopCloser(strings.NewReader("")))
_, err := imgStore.PutBlobChunk(testImage, "uuid", 12, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{
commitFn: func() error {
return errS3
@ -627,13 +623,13 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
err := imgStore.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{
closeFn: func() error {
return errS3
@ -642,91 +638,91 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
err := imgStore.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return nil, errS3
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
err := imgStore.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload4", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
moveFn: func(ctx context.Context, sourcePath, destPath string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
err := imgStore.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
imgStore = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
_, _, err := imgStore.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{})
imgStore = createMockStorage(testDir, &StorageDriverMock{})
d := godigest.FromBytes([]byte(" "))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
_, _, err := imgStore.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
moveFn: func(ctx context.Context, sourcePath, destPath string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
_, _, err := imgStore.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test GetBlob", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("")), errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.GetBlob(testImage, d.String(), "")
_, _, err := imgStore.GetBlob(testImage, d.String(), "")
So(err, ShouldNotBeNil)
})
Convey("Test DeleteBlob", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
deleteFn: func(ctx context.Context, path string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
err := il.DeleteBlob(testImage, d.String())
err := imgStore.DeleteBlob(testImage, d.String())
So(err, ShouldNotBeNil)
})
Convey("Test GetReferrers", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
imgStore = createMockStorage(testDir, &StorageDriverMock{
deleteFn: func(ctx context.Context, path string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
_, err := il.GetReferrers(testImage, d.String(), "application/image")
_, err := imgStore.GetReferrers(testImage, d.String(), "application/image")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, zerr.ErrMethodNotSupported)
})

View File

@ -5,6 +5,7 @@ import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"path"
@ -12,25 +13,26 @@ import (
"strings"
"sync"
// Add s3 support.
"github.com/docker/distribution/registry/storage/driver"
// Load s3 driver.
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
guuid "github.com/gofrs/uuid"
"github.com/notaryproject/notation-go-lib"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rs/zerolog"
"zotregistry.io/zot/errors"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/extensions/monitoring"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
// Add s3 support
storageDriver "github.com/docker/distribution/registry/storage/driver"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws" // Load s3 driver
)
// ObjectStorage provides the image storage operations.
type ObjectStorage struct {
rootDir string
store storageDriver.StorageDriver
store driver.StorageDriver
lock *sync.RWMutex
blobUploads map[string]storage.BlobUpload
log zerolog.Logger
@ -55,19 +57,19 @@ func (is *ObjectStorage) DirExists(d string) bool {
// NewObjectStorage returns a new image store backed by cloud storages.
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger, m monitoring.MetricServer,
store storageDriver.StorageDriver) storage.ImageStore {
is := &ObjectStorage{
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger, metrics monitoring.MetricServer,
store driver.StorageDriver) storage.ImageStore {
imgStore := &ObjectStorage{
rootDir: rootDir,
store: store,
lock: &sync.RWMutex{},
blobUploads: make(map[string]storage.BlobUpload),
log: log.With().Caller().Logger(),
isMultiPartUpload: make(map[string]bool),
metrics: m,
metrics: metrics,
}
return is
return imgStore
}
// RLock read-lock.
@ -101,15 +103,17 @@ func (is *ObjectStorage) initRepo(name string) error {
ilPath := path.Join(repoDir, ispec.ImageLayoutFile)
if _, err := is.store.Stat(context.Background(), ilPath); err != nil {
il := ispec.ImageLayout{Version: ispec.ImageLayoutVersion}
buf, err := json.Marshal(il)
buf, err := json.Marshal(il)
if err != nil {
is.log.Error().Err(err).Msg("unable to marshal JSON")
return err
}
if _, err := writeFile(is.store, ilPath, buf); err != nil {
is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
return err
}
}
@ -119,15 +123,17 @@ func (is *ObjectStorage) initRepo(name string) error {
if _, err := is.store.Stat(context.Background(), indexPath); err != nil {
index := ispec.Index{}
index.SchemaVersion = 2
buf, err := json.Marshal(index)
buf, err := json.Marshal(index)
if err != nil {
is.log.Error().Err(err).Msg("unable to marshal JSON")
return err
}
if _, err := writeFile(is.store, indexPath, buf); err != nil {
is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
return err
}
}
@ -151,18 +157,19 @@ func (is *ObjectStorage) ValidateRepo(name string) (bool, error) {
// for objects storage we can not create empty dirs, so we check only against index.json and oci-layout
dir := path.Join(is.rootDir, name)
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
return false, errors.ErrRepoNotFound
return false, zerr.ErrRepoNotFound
}
files, err := is.store.List(context.Background(), dir)
if err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory")
return false, errors.ErrRepoNotFound
return false, zerr.ErrRepoNotFound
}
// nolint:gomnd
if len(files) < 2 {
return false, errors.ErrRepoBadVersion
return false, zerr.ErrRepoBadVersion
}
found := map[string]bool{
@ -205,7 +212,7 @@ func (is *ObjectStorage) ValidateRepo(name string) (bool, error) {
}
if il.Version != ispec.ImageLayoutVersion {
return false, errors.ErrRepoBadVersion
return false, zerr.ErrRepoBadVersion
}
return true, nil
@ -219,18 +226,18 @@ func (is *ObjectStorage) GetRepositories() ([]string, error) {
defer is.RUnlock()
stores := make([]string, 0)
err := is.store.Walk(context.Background(), dir, func(fileInfo storageDriver.FileInfo) error {
err := is.store.Walk(context.Background(), dir, func(fileInfo driver.FileInfo) error {
if !fileInfo.IsDir() {
return nil
}
rel, err := filepath.Rel(is.rootDir, fileInfo.Path())
if err != nil {
return nil
return nil //nolint:nilerr // ignore paths that are not under root dir
}
if ok, err := is.ValidateRepo(rel); !ok || err != nil {
return nil
return nil //nolint:nilerr // ignore invalid repos
}
stores = append(stores, rel)
@ -239,8 +246,8 @@ func (is *ObjectStorage) GetRepositories() ([]string, error) {
})
// if the root directory is not yet created then return an empty slice of repositories
_, ok := err.(storageDriver.PathNotFoundError)
if ok {
var perr driver.PathNotFoundError
if errors.As(err, &perr) {
return stores, nil
}
@ -251,7 +258,7 @@ func (is *ObjectStorage) GetRepositories() ([]string, error) {
func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
dir := path.Join(is.rootDir, repo)
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
return nil, errors.ErrRepoNotFound
return nil, zerr.ErrRepoNotFound
}
is.RLock()
@ -265,7 +272,8 @@ func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
var index ispec.Index
if err := json.Unmarshal(buf, &index); err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
return nil, errors.ErrRepoNotFound
return nil, zerr.ErrRepoNotFound
}
tags := make([]string, 0)
@ -284,7 +292,7 @@ func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) {
func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte, string, string, error) {
dir := path.Join(is.rootDir, repo)
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
return nil, "", "", errors.ErrRepoNotFound
return nil, "", "", zerr.ErrRepoNotFound
}
is.RLock()
@ -298,6 +306,7 @@ func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte
var index ispec.Index
if err := json.Unmarshal(buf, &index); err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
return nil, "", "", err
}
@ -307,19 +316,19 @@ func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte
mediaType := ""
for _, m := range index.Manifests {
if reference == m.Digest.String() {
digest = m.Digest
mediaType = m.MediaType
for _, manifest := range index.Manifests {
if reference == manifest.Digest.String() {
digest = manifest.Digest
mediaType = manifest.MediaType
found = true
break
}
v, ok := m.Annotations[ispec.AnnotationRefName]
v, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && v == reference {
digest = m.Digest
mediaType = m.MediaType
digest = manifest.Digest
mediaType = manifest.MediaType
found = true
break
@ -327,7 +336,7 @@ func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte
}
if !found {
return nil, "", "", errors.ErrManifestNotFound
return nil, "", "", zerr.ErrManifestNotFound
}
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
@ -335,12 +344,14 @@ func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte
buf, err = is.store.GetContent(context.Background(), p)
if err != nil {
is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest")
return nil, "", "", err
}
var manifest ispec.Manifest
if err := json.Unmarshal(buf, &manifest); err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
return nil, "", "", err
}
@ -354,29 +365,34 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
body []byte) (string, error) {
if err := is.InitRepo(repo); err != nil {
is.log.Debug().Err(err).Msg("init repo")
return "", err
}
if mediaType != ispec.MediaTypeImageManifest {
is.log.Debug().Interface("actual", mediaType).
Interface("expected", ispec.MediaTypeImageManifest).Msg("bad manifest media type")
return "", errors.ErrBadManifest
return "", zerr.ErrBadManifest
}
if len(body) == 0 {
is.log.Debug().Int("len", len(body)).Msg("invalid body length")
return "", errors.ErrBadManifest
return "", zerr.ErrBadManifest
}
var m ispec.Manifest
if err := json.Unmarshal(body, &m); err != nil {
is.log.Error().Err(err).Msg("unable to unmarshal JSON")
return "", errors.ErrBadManifest
return "", zerr.ErrBadManifest
}
if m.SchemaVersion != storage.SchemaVersion {
is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest")
return "", errors.ErrBadManifest
return "", zerr.ErrBadManifest
}
for _, l := range m.Layers {
@ -386,19 +402,21 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
if _, err := is.store.Stat(context.Background(), blobPath); err != nil {
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
return digest.String(), errors.ErrBlobNotFound
return digest.String(), zerr.ErrBlobNotFound
}
}
mDigest := godigest.FromBytes(body)
refIsDigest := false
d, err := godigest.Parse(reference)
dgst, err := godigest.Parse(reference)
if err == nil {
if d.String() != mDigest.String() {
is.log.Error().Str("actual", mDigest.String()).Str("expected", d.String()).
if dgst.String() != mDigest.String() {
is.log.Error().Str("actual", mDigest.String()).Str("expected", dgst.String()).
Msg("manifest digest is not valid")
return "", errors.ErrBadManifest
return "", zerr.ErrBadManifest
}
refIsDigest = true
@ -417,31 +435,34 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
var index ispec.Index
if err := json.Unmarshal(buf, &index); err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
return "", errors.ErrRepoBadVersion
return "", zerr.ErrRepoBadVersion
}
updateIndex := true
// create a new descriptor
desc := ispec.Descriptor{MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
Platform: &ispec.Platform{Architecture: "amd64", OS: "linux"}}
desc := ispec.Descriptor{
MediaType: mediaType, Size: int64(len(body)), Digest: mDigest,
Platform: &ispec.Platform{Architecture: "amd64", OS: "linux"},
}
if !refIsDigest {
desc.Annotations = map[string]string{ispec.AnnotationRefName: reference}
}
for i, m := range index.Manifests {
if reference == m.Digest.String() {
for midx, manifest := range index.Manifests {
if reference == manifest.Digest.String() {
// nothing changed, so don't update
desc = m
desc = manifest
updateIndex = false
break
}
v, ok := m.Annotations[ispec.AnnotationRefName]
v, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && v == reference {
if m.Digest.String() == mDigest.String() {
if manifest.Digest.String() == mDigest.String() {
// nothing changed, so don't update
desc = m
desc = manifest
updateIndex = false
break
@ -455,11 +476,11 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
Str("new digest", mDigest.String()).
Msg("updating existing tag with new manifest contents")
desc = m
desc = manifest
desc.Size = int64(len(body))
desc.Digest = mDigest
index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...)
index.Manifests = append(index.Manifests[:midx], index.Manifests[midx+1:]...)
break
}
@ -475,6 +496,7 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
if err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {
is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
return "", err
}
@ -486,11 +508,13 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
if err != nil {
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
return "", err
}
if err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {
is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
return "", err
}
@ -504,13 +528,13 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy
func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) error {
dir := path.Join(is.rootDir, repo)
if fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {
return errors.ErrRepoNotFound
return zerr.ErrRepoNotFound
}
isTag := false
// as per spec "reference" can only be a digest and not a tag
digest, err := godigest.Parse(reference)
dgst, err := godigest.Parse(reference)
if err != nil {
is.log.Debug().Str("invalid digest: ", reference).Msg("storage: assuming tag")
@ -528,40 +552,42 @@ func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) erro
var index ispec.Index
if err := json.Unmarshal(buf, &index); err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("invalid JSON")
return err
}
found := false
var m ispec.Descriptor
var manifest ispec.Descriptor
// we are deleting, so keep only those manifests that don't match
outIndex := index
outIndex.Manifests = []ispec.Descriptor{}
for _, m = range index.Manifests {
for _, manifest = range index.Manifests {
if isTag {
tag, ok := m.Annotations[ispec.AnnotationRefName]
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && tag == reference {
is.log.Debug().Str("deleting tag", tag).Msg("")
digest = m.Digest
dgst = manifest.Digest
found = true
continue
}
} else if reference == m.Digest.String() {
} else if reference == manifest.Digest.String() {
is.log.Debug().Str("deleting reference", reference).Msg("")
found = true
continue
}
outIndex.Manifests = append(outIndex.Manifests, m)
outIndex.Manifests = append(outIndex.Manifests, manifest)
}
if !found {
return errors.ErrManifestNotFound
return zerr.ErrManifestNotFound
}
// now update "index.json"
@ -575,6 +601,7 @@ func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) erro
if _, err := writeFile(is.store, file, buf); err != nil {
is.log.Debug().Str("deleting reference", reference).Msg("")
return err
}
@ -582,15 +609,16 @@ func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) erro
// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.
toDelete := true
for _, m = range outIndex.Manifests {
if digest.String() == m.Digest.String() {
for _, manifest = range outIndex.Manifests {
if dgst.String() == manifest.Digest.String() {
toDelete = false
break
}
}
if toDelete {
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
p := path.Join(dir, "blobs", dgst.Algorithm().String(), dgst.Encoded())
err = is.store.Delete(context.Background(), p)
if err != nil {
@ -624,18 +652,18 @@ func (is *ObjectStorage) NewBlobUpload(repo string) (string, error) {
return "", err
}
u := uuid.String()
uid := uuid.String()
blobUploadPath := is.BlobUploadPath(repo, u)
blobUploadPath := is.BlobUploadPath(repo, uid)
// here we should create an empty multi part upload, but that's not possible
// so we just create a regular empty file which will be overwritten by FinishBlobUpload
err = is.store.PutContent(context.Background(), blobUploadPath, []byte{})
if err != nil {
return "", errors.ErrRepoNotFound
return "", zerr.ErrRepoNotFound
}
return u, nil
return uid, nil
}
// GetBlobUpload returns the current size of a blob upload.
@ -648,17 +676,17 @@ func (is *ObjectStorage) GetBlobUpload(repo string, uuid string) (int64, error)
// created by NewBlobUpload, it should have 0 size every time
isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath]
if !isMultiPartStarted || !ok {
fi, err := is.store.Stat(context.Background(), blobUploadPath)
binfo, err := is.store.Stat(context.Background(), blobUploadPath)
if err != nil {
_, ok := err.(storageDriver.PathNotFoundError)
if ok {
return -1, errors.ErrUploadNotFound
var perr driver.PathNotFoundError
if errors.As(err, &perr) {
return -1, zerr.ErrUploadNotFound
}
return -1, err
}
fileSize = fi.Size()
fileSize = binfo.Size()
} else {
// otherwise get the size of multi parts upload
fi, err := getMultipartFileWriter(is, blobUploadPath)
@ -683,12 +711,13 @@ func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io.
_, err := is.store.Stat(context.Background(), blobUploadPath)
if err != nil {
return -1, errors.ErrUploadNotFound
return -1, zerr.ErrUploadNotFound
}
file, err := getMultipartFileWriter(is, blobUploadPath)
if err != nil {
is.log.Error().Err(err).Msg("failed to create multipart upload")
return -1, err
}
@ -699,16 +728,18 @@ func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io.
_, err = buf.ReadFrom(body)
if err != nil {
is.log.Error().Err(err).Msg("failed to read blob")
return -1, err
}
n, err := file.Write(buf.Bytes())
nbytes, err := file.Write(buf.Bytes())
if err != nil {
is.log.Error().Err(err).Msg("failed to append to file")
return -1, err
}
return int64(n), err
return int64(nbytes), err
}
// PutBlobChunk writes another chunk of data to the specified blob. It returns
@ -723,12 +754,13 @@ func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to i
_, err := is.store.Stat(context.Background(), blobUploadPath)
if err != nil {
return -1, errors.ErrUploadNotFound
return -1, zerr.ErrUploadNotFound
}
file, err := getMultipartFileWriter(is, blobUploadPath)
if err != nil {
is.log.Error().Err(err).Msg("failed to create multipart upload")
return -1, err
}
@ -739,13 +771,14 @@ func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to i
err := file.Cancel()
if err != nil {
is.log.Error().Err(err).Msg("failed to cancel multipart upload")
return -1, err
}
is.log.Error().Int64("expected", from).Int64("actual", file.Size()).
Msg("invalid range start for blob upload")
return -1, errors.ErrBadUploadRange
return -1, zerr.ErrBadUploadRange
}
buf := new(bytes.Buffer)
@ -753,18 +786,20 @@ func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to i
_, err = buf.ReadFrom(body)
if err != nil {
is.log.Error().Err(err).Msg("failed to read blob")
return -1, err
}
n, err := file.Write(buf.Bytes())
nbytes, err := file.Write(buf.Bytes())
if err != nil {
is.log.Error().Err(err).Msg("failed to append to file")
return -1, err
}
is.isMultiPartUpload[blobUploadPath] = true
return int64(n), err
return int64(nbytes), err
}
// BlobUploadInfo returns the current blob size in bytes.
@ -777,22 +812,24 @@ func (is *ObjectStorage) BlobUploadInfo(repo string, uuid string) (int64, error)
// created by NewBlobUpload, it should have 0 size every time
isMultiPartStarted, ok := is.isMultiPartUpload[blobUploadPath]
if !isMultiPartStarted || !ok {
fi, err := is.store.Stat(context.Background(), blobUploadPath)
uploadInfo, err := is.store.Stat(context.Background(), blobUploadPath)
if err != nil {
is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob")
return -1, err
}
fileSize = fi.Size()
fileSize = uploadInfo.Size()
} else {
// otherwise get the size of multi parts upload
fi, err := getMultipartFileWriter(is, blobUploadPath)
binfo, err := getMultipartFileWriter(is, blobUploadPath)
if err != nil {
is.log.Error().Err(err).Str("blob", blobUploadPath).Msg("failed to stat blob")
return -1, err
}
fileSize = fi.Size()
fileSize = binfo.Size()
}
return fileSize, nil
@ -803,7 +840,8 @@ func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Read
dstDigest, err := godigest.Parse(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
return errors.ErrBadBlobDigest
return zerr.ErrBadBlobDigest
}
src := is.BlobUploadPath(repo, uuid)
@ -812,11 +850,13 @@ func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Read
fileWriter, err := is.store.Writer(context.Background(), src, true)
if err != nil {
is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
return errors.ErrBadBlobDigest
return zerr.ErrBadBlobDigest
}
if err := fileWriter.Commit(); err != nil {
is.log.Error().Err(err).Msg("failed to commit file")
return err
}
@ -827,19 +867,22 @@ func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Read
fileReader, err := is.store.Reader(context.Background(), src, 0)
if err != nil {
is.log.Error().Err(err).Str("blob", src).Msg("failed to open file")
return errors.ErrUploadNotFound
return zerr.ErrUploadNotFound
}
srcDigest, err := godigest.FromReader(fileReader)
if err != nil {
is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
return errors.ErrBadBlobDigest
return zerr.ErrBadBlobDigest
}
if srcDigest != dstDigest {
is.log.Error().Str("srcDigest", srcDigest.String()).
Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
return errors.ErrBadBlobDigest
return zerr.ErrBadBlobDigest
}
fileReader.Close()
@ -849,6 +892,7 @@ func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Read
if err := is.store.Move(context.Background(), src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to finish blob")
return err
}
@ -867,7 +911,8 @@ func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest stri
dstDigest, err := godigest.Parse(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
return "", -1, errors.ErrBadBlobDigest
return "", -1, zerr.ErrBadBlobDigest
}
u, err := guuid.NewV4()
@ -886,18 +931,21 @@ func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest stri
_, err = buf.ReadFrom(body)
if err != nil {
is.log.Error().Err(err).Msg("failed to read blob")
return "", -1, err
}
n, err := writeFile(is.store, src, buf.Bytes())
nbytes, err := writeFile(is.store, src, buf.Bytes())
if err != nil {
is.log.Error().Err(err).Msg("failed to write blob")
return "", -1, err
}
_, err = digester.Write(buf.Bytes())
if err != nil {
is.log.Error().Err(err).Msg("digester failed to write")
return "", -1, err
}
@ -905,7 +953,8 @@ func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest stri
if srcDigest != dstDigest {
is.log.Error().Str("srcDigest", srcDigest.String()).
Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
return "", -1, errors.ErrBadBlobDigest
return "", -1, zerr.ErrBadBlobDigest
}
is.Lock()
@ -916,10 +965,11 @@ func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, digest stri
if err := is.store.Move(context.Background(), src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to finish blob")
return "", -1, err
}
return uuid, int64(n), nil
return uuid, int64(nbytes), nil
}
func (is *ObjectStorage) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
@ -931,6 +981,7 @@ func (is *ObjectStorage) DeleteBlobUpload(repo string, uuid string) error {
blobUploadPath := is.BlobUploadPath(repo, uuid)
if err := is.store.Delete(context.Background(), blobUploadPath); err != nil {
is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload")
return err
}
@ -944,22 +995,23 @@ func (is *ObjectStorage) BlobPath(repo string, digest godigest.Digest) string {
// CheckBlob verifies a blob and returns true if the blob is correct.
func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, error) {
d, err := godigest.Parse(digest)
dgst, err := godigest.Parse(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
return false, -1, errors.ErrBadBlobDigest
return false, -1, zerr.ErrBadBlobDigest
}
blobPath := is.BlobPath(repo, d)
blobPath := is.BlobPath(repo, dgst)
is.RLock()
defer is.RUnlock()
blobInfo, err := is.store.Stat(context.Background(), blobPath)
binfo, err := is.store.Stat(context.Background(), blobPath)
if err != nil {
_, ok := err.(storageDriver.PathNotFoundError)
if ok {
return false, -1, errors.ErrBlobNotFound
var perr driver.PathNotFoundError
if errors.As(err, &perr) {
return false, -1, zerr.ErrBlobNotFound
}
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
@ -969,37 +1021,39 @@ func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, err
is.log.Debug().Str("blob path", blobPath).Msg("blob path found")
return true, blobInfo.Size(), nil
return true, binfo.Size(), nil
}
// GetBlob returns a stream to read the blob.
// FIXME: we should probably parse the manifest and use (digest, mediaType) as a
// blob selector instead of directly downloading the blob.
func (is *ObjectStorage) GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) {
d, err := godigest.Parse(digest)
dgst, err := godigest.Parse(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
return nil, -1, errors.ErrBadBlobDigest
return nil, -1, zerr.ErrBadBlobDigest
}
blobPath := is.BlobPath(repo, d)
blobPath := is.BlobPath(repo, dgst)
is.RLock()
defer is.RUnlock()
blobInfo, err := is.store.Stat(context.Background(), blobPath)
binfo, err := is.store.Stat(context.Background(), blobPath)
if err != nil {
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
return nil, -1, errors.ErrBlobNotFound
return nil, -1, zerr.ErrBlobNotFound
}
blobReader, err := is.store.Reader(context.Background(), blobPath, 0)
if err != nil {
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to open blob")
return nil, -1, err
}
return blobReader, blobInfo.Size(), nil
return blobReader, binfo.Size(), nil
}
func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, error) {
@ -1013,6 +1067,7 @@ func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, err
_, err = buf.ReadFrom(blob)
if err != nil {
is.log.Error().Err(err).Msg("failed to read blob")
return []byte{}, err
}
@ -1020,7 +1075,7 @@ func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, err
}
func (is *ObjectStorage) GetReferrers(repo, digest string, mediaType string) ([]notation.Descriptor, error) {
return nil, errors.ErrMethodNotSupported
return nil, zerr.ErrMethodNotSupported
}
func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {
@ -1029,7 +1084,8 @@ func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {
buf, err := is.store.GetContent(context.Background(), path.Join(dir, "index.json"))
if err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
return []byte{}, errors.ErrRepoNotFound
return []byte{}, zerr.ErrRepoNotFound
}
return buf, nil
@ -1037,13 +1093,14 @@ func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) {
// DeleteBlob removes the blob from the repository.
func (is *ObjectStorage) DeleteBlob(repo string, digest string) error {
d, err := godigest.Parse(digest)
dgst, err := godigest.Parse(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest")
return errors.ErrBlobNotFound
return zerr.ErrBlobNotFound
}
blobPath := is.BlobPath(repo, d)
blobPath := is.BlobPath(repo, dgst)
is.Lock()
defer is.Unlock()
@ -1051,11 +1108,13 @@ func (is *ObjectStorage) DeleteBlob(repo string, digest string) error {
_, err = is.store.Stat(context.Background(), blobPath)
if err != nil {
is.log.Error().Err(err).Str("blob", blobPath).Msg("failed to stat blob")
return errors.ErrBlobNotFound
return zerr.ErrBlobNotFound
}
if err := is.store.Delete(context.Background(), blobPath); err != nil {
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path")
return err
}
@ -1064,17 +1123,17 @@ func (is *ObjectStorage) DeleteBlob(repo string, digest string) error {
// Do not use for multipart upload, buf must not be empty.
// If you want to create an empty file use is.store.PutContent().
func writeFile(store storageDriver.StorageDriver, filepath string, buf []byte) (int, error) {
func writeFile(store driver.StorageDriver, filepath string, buf []byte) (int, error) {
var n int
if fw, err := store.Writer(context.Background(), filepath, false); err == nil {
defer fw.Close()
if stwr, err := store.Writer(context.Background(), filepath, false); err == nil {
defer stwr.Close()
if n, err = fw.Write(buf); err != nil {
if n, err = stwr.Write(buf); err != nil {
return -1, err
}
if err := fw.Commit(); err != nil {
if err := stwr.Commit(); err != nil {
return -1, err
}
} else {
@ -1087,19 +1146,19 @@ func writeFile(store storageDriver.StorageDriver, filepath string, buf []byte) (
// Because we can not create an empty multipart upload, we store multi part uploads
// so that we know when to create a fileWriter with append=true or with append=false
// Trying and handling errors results in weird s3 api errors.
func getMultipartFileWriter(is *ObjectStorage, filepath string) (storageDriver.FileWriter, error) {
var file storageDriver.FileWriter
func getMultipartFileWriter(imgStore *ObjectStorage, filepath string) (driver.FileWriter, error) {
var file driver.FileWriter
var err error
isMultiPartStarted, ok := is.isMultiPartUpload[filepath]
isMultiPartStarted, ok := imgStore.isMultiPartUpload[filepath]
if !isMultiPartStarted || !ok {
file, err = is.store.Writer(context.Background(), filepath, false)
file, err = imgStore.store.Writer(context.Background(), filepath, false)
if err != nil {
return file, err
}
} else {
file, err = is.store.Writer(context.Background(), filepath, true)
file, err = imgStore.store.Writer(context.Background(), filepath, true)
if err != nil {
return file, err
}

View File

@ -31,14 +31,14 @@ const (
)
type ScrubImageResult struct {
ImageName string `json:"image_name"`
ImageName string `json:"imageName"`
Tag string `json:"tag"`
Status string `json:"status"`
Error string `json:"error"`
}
type ScrubResults struct {
ScrubResults []ScrubImageResult `json:"scrub_results"`
ScrubResults []ScrubImageResult `json:"scrubResults"`
}
func (sc StoreController) CheckAllBlobsIntegrity() (ScrubResults, error) {
@ -51,16 +51,14 @@ func (sc StoreController) CheckAllBlobsIntegrity() (ScrubResults, error) {
imageStoreList[""] = sc.DefaultStore
for _, is := range imageStoreList {
images, err := is.GetRepositories()
for _, imgStore := range imageStoreList {
images, err := imgStore.GetRepositories()
if err != nil {
return results, err
}
for _, repo := range images {
imageResults, err := checkImage(repo, is)
imageResults, err := checkImage(repo, imgStore)
if err != nil {
return results, err
}
@ -72,11 +70,11 @@ func (sc StoreController) CheckAllBlobsIntegrity() (ScrubResults, error) {
return results, nil
}
func checkImage(imageName string, is ImageStore) ([]ScrubImageResult, error) {
func checkImage(imageName string, imgStore ImageStore) ([]ScrubImageResult, error) {
results := []ScrubImageResult{}
dir := path.Join(is.RootDir(), imageName)
if !is.DirExists(dir) {
dir := path.Join(imgStore.RootDir(), imageName)
if !imgStore.DirExists(dir) {
return results, errors.ErrRepoNotFound
}
@ -89,11 +87,10 @@ func checkImage(imageName string, is ImageStore) ([]ScrubImageResult, error) {
defer oci.Close()
is.RLock()
defer is.RUnlock()
imgStore.RLock()
defer imgStore.RUnlock()
buf, err := ioutil.ReadFile(path.Join(dir, "index.json"))
if err != nil {
return results, err
}
@ -137,25 +134,29 @@ func checkIntegrity(ctx context.Context, imageName, tagName string, oci casext.E
_, err = os.Stat(layerPath)
if err != nil {
imageRes = getResult(imageName, tagName, errors.ErrBlobNotFound)
break
}
f, err := os.Open(layerPath)
layerFh, err := os.Open(layerPath)
if err != nil {
imageRes = getResult(imageName, tagName, errors.ErrBlobNotFound)
break
}
computedDigest, err := godigest.FromReader(f)
f.Close()
computedDigest, err := godigest.FromReader(layerFh)
layerFh.Close()
if err != nil {
imageRes = getResult(imageName, tagName, errors.ErrBadBlobDigest)
break
}
if computedDigest != layer.Digest {
imageRes = getResult(imageName, tagName, errors.ErrBadBlobDigest)
break
}
@ -209,10 +210,12 @@ func getScrubTableWriter(writer io.Writer) *tablewriter.Table {
return table
}
const tableCols = 4
func printScrubTableHeader(writer io.Writer) {
table := getScrubTableWriter(writer)
row := make([]string, 4)
row := make([]string, tableCols)
row[colImageNameIndex] = "IMAGE NAME"
row[colTagIndex] = "TAG"
@ -232,7 +235,7 @@ func printImageResult(imageResult ScrubImageResult) string {
table.SetColMinWidth(colStatusIndex, statusWidth)
table.SetColMinWidth(colErrorIndex, errorWidth)
row := make([]string, 4)
row := make([]string, tableCols)
row[colImageNameIndex] = imageResult.ImageName
row[colTagIndex] = imageResult.Tag

View File

@ -36,20 +36,20 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Scrub only one repo", t, func(c C) {
// initialize repo
err = il.InitRepo(repoName)
err = imgStore.InitRepo(repoName)
So(err, ShouldBeNil)
ok := il.DirExists(path.Join(il.RootDir(), repoName))
ok := imgStore.DirExists(path.Join(imgStore.RootDir(), repoName))
So(ok, ShouldBeTrue)
storeController := storage.StoreController{}
storeController.DefaultStore = il
So(storeController.GetImageStore(repoName), ShouldResemble, il)
storeController.DefaultStore = imgStore
So(storeController.GetImageStore(repoName), ShouldResemble, imgStore)
sc := storage.StoreController{}
sc.DefaultStore = il
storeCtlr := storage.StoreController{}
storeCtlr.DefaultStore = imgStore
const tag = "1.0"
@ -60,15 +60,15 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
// create layer digest
body := []byte("this is a blob")
buf := bytes.NewBuffer(body)
l := buf.Len()
d := godigest.FromBytes(body)
u, n, err := il.FullBlobUpload(repoName, buf, d.String())
buflen := buf.Len()
digest := godigest.FromBytes(body)
upload, n, err := imgStore.FullBlobUpload(repoName, buf, digest.String())
So(err, ShouldBeNil)
So(n, ShouldEqual, len(body))
So(u, ShouldNotBeEmpty)
layer = d.String()
So(upload, ShouldNotBeEmpty)
layer = digest.String()
//create config digest
// create config digest
created := time.Now().Format("2006-01-02T15:04:05Z")
configBody := []byte(fmt.Sprintf(`{
"created": "%v",
@ -96,7 +96,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
configBuf := bytes.NewBuffer(configBody)
configLen := configBuf.Len()
configDigest := godigest.FromBytes(configBody)
uConfig, nConfig, err := il.FullBlobUpload(repoName, configBuf, configDigest.String())
uConfig, nConfig, err := imgStore.FullBlobUpload(repoName, configBuf, configDigest.String())
So(err, ShouldBeNil)
So(nConfig, ShouldEqual, len(configBody))
So(uConfig, ShouldNotBeEmpty)
@ -105,7 +105,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
// create manifest and add it to the repository
annotationsMap := make(map[string]string)
annotationsMap[ispec.AnnotationRefName] = tag
m := ispec.Manifest{
mnfst := ispec.Manifest{
Config: ispec.Descriptor{
MediaType: "application/vnd.oci.image.config.v1+json",
Digest: configDigest,
@ -114,23 +114,23 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
Annotations: annotationsMap,
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
mnfst.SchemaVersion = 2
mb, _ := json.Marshal(mnfst)
manifest, err = il.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, mb)
manifest, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
Convey("Blobs integrity not affected", func() {
buff := bytes.NewBufferString("")
res, err := sc.CheckAllBlobsIntegrity()
res, err := storeCtlr.CheckAllBlobsIntegrity()
res.PrintScrubResults(buff)
So(err, ShouldBeNil)
@ -143,18 +143,18 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
Convey("Manifest integrity affected", func() {
// get content of manifest file
content, _, _, err := il.GetImageManifest(repoName, manifest)
content, _, _, err := imgStore.GetImageManifest(repoName, manifest)
So(err, ShouldBeNil)
// delete content of manifest file
manifest = strings.ReplaceAll(manifest, "sha256:", "")
manifestFile := path.Join(il.RootDir(), repoName, "/blobs/sha256", manifest)
manifestFile := path.Join(imgStore.RootDir(), repoName, "/blobs/sha256", manifest)
err = os.Truncate(manifestFile, 0)
So(err, ShouldBeNil)
buff := bytes.NewBufferString("")
res, err := sc.CheckAllBlobsIntegrity()
res, err := storeCtlr.CheckAllBlobsIntegrity()
res.PrintScrubResults(buff)
So(err, ShouldBeNil)
@ -166,24 +166,24 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
So(actual, ShouldContainSubstring, "test 1.0 affected parse application/vnd.oci.image.manifest.v1+json")
// put manifest content back to file
err = ioutil.WriteFile(manifestFile, content, 0600)
err = ioutil.WriteFile(manifestFile, content, 0o600)
So(err, ShouldBeNil)
})
Convey("Config integrity affected", func() {
// get content of config file
content, err := il.GetBlobContent(repoName, config)
content, err := imgStore.GetBlobContent(repoName, config)
So(err, ShouldBeNil)
// delete content of config file
config = strings.ReplaceAll(config, "sha256:", "")
configFile := path.Join(il.RootDir(), repoName, "/blobs/sha256", config)
configFile := path.Join(imgStore.RootDir(), repoName, "/blobs/sha256", config)
err = os.Truncate(configFile, 0)
So(err, ShouldBeNil)
buff := bytes.NewBufferString("")
res, err := sc.CheckAllBlobsIntegrity()
res, err := storeCtlr.CheckAllBlobsIntegrity()
res.PrintScrubResults(buff)
So(err, ShouldBeNil)
@ -194,24 +194,24 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
So(actual, ShouldContainSubstring, "test 1.0 affected stat: parse application/vnd.oci.image.config.v1+json")
// put config content back to file
err = ioutil.WriteFile(configFile, content, 0600)
err = ioutil.WriteFile(configFile, content, 0o600)
So(err, ShouldBeNil)
})
Convey("Layers integrity affected", func() {
// get content of layer
content, err := il.GetBlobContent(repoName, layer)
content, err := imgStore.GetBlobContent(repoName, layer)
So(err, ShouldBeNil)
// delete content of layer file
layer = strings.ReplaceAll(layer, "sha256:", "")
layerFile := path.Join(il.RootDir(), repoName, "/blobs/sha256", layer)
layerFile := path.Join(imgStore.RootDir(), repoName, "/blobs/sha256", layer)
err = os.Truncate(layerFile, 0)
So(err, ShouldBeNil)
buff := bytes.NewBufferString("")
res, err := sc.CheckAllBlobsIntegrity()
res, err := storeCtlr.CheckAllBlobsIntegrity()
res.PrintScrubResults(buff)
So(err, ShouldBeNil)
@ -222,20 +222,20 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
So(actual, ShouldContainSubstring, "test 1.0 affected blob: bad blob digest")
// put layer content back to file
err = ioutil.WriteFile(layerFile, content, 0600)
err = ioutil.WriteFile(layerFile, content, 0o600)
So(err, ShouldBeNil)
})
Convey("Layer not found", func() {
// delete layer file
layer = strings.ReplaceAll(layer, "sha256:", "")
layerFile := path.Join(il.RootDir(), repoName, "/blobs/sha256", layer)
layerFile := path.Join(imgStore.RootDir(), repoName, "/blobs/sha256", layer)
err = os.Remove(layerFile)
So(err, ShouldBeNil)
buff := bytes.NewBufferString("")
res, err := sc.CheckAllBlobsIntegrity()
res, err := storeCtlr.CheckAllBlobsIntegrity()
res.PrintScrubResults(buff)
So(err, ShouldBeNil)

File diff suppressed because it is too large Load Diff

View File

@ -2,16 +2,16 @@ package storage_test
import (
"bytes"
"crypto/rand"
_ "crypto/sha256"
"encoding/json"
"io/ioutil"
"math/rand"
"math/big"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -33,73 +33,73 @@ func TestStorageFSAPIs(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Repo layout", t, func(c C) {
repoName := "test"
Convey("Bad image manifest", func() {
v, err := il.NewBlobUpload("test")
upload, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
buflen := buf.Len()
digest := godigest.FromBytes(content)
b, err := il.PutBlobChunk(repoName, v, 0, int64(l), buf)
blob, err := imgStore.PutBlobChunk(repoName, upload, 0, int64(buflen), buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
err = il.FinishBlobUpload("test", v, buf, d.String())
err = imgStore.FinishBlobUpload("test", upload, buf, digest.String())
So(err, ShouldBeNil)
annotationsMap := make(map[string]string)
annotationsMap[ispec.AnnotationRefName] = "1.0"
m := ispec.Manifest{
manifest := ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
Annotations: annotationsMap,
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
manifest.SchemaVersion = 2
manifestBuf, _ := json.Marshal(manifest)
digest = godigest.FromBytes(manifestBuf)
err = os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0000)
err = os.Chmod(path.Join(imgStore.RootDir(), repoName, "index.json"), 0o000)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0755)
err = os.Chmod(path.Join(imgStore.RootDir(), repoName, "index.json"), 0o755)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
manifestPath := path.Join(il.RootDir(), repoName, "blobs", d.Algorithm().String(), d.Encoded())
manifestPath := path.Join(imgStore.RootDir(), repoName, "blobs", digest.Algorithm().String(), digest.Encoded())
err = os.Chmod(manifestPath, 0000)
err = os.Chmod(manifestPath, 0o000)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest(repoName, d.String())
_, _, _, err = imgStore.GetImageManifest(repoName, digest.String())
So(err, ShouldNotBeNil)
err = os.Remove(manifestPath)
@ -107,42 +107,42 @@ func TestStorageFSAPIs(t *testing.T) {
panic(err)
}
_, _, _, err = il.GetImageManifest(repoName, d.String())
_, _, _, err = imgStore.GetImageManifest(repoName, digest.String())
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName), 0000)
err = os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o000)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "2.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest(repoName, "2.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName), 0755)
err = os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o755)
if err != nil {
panic(err)
}
// invalid GetReferrers
_, err = il.GetReferrers("invalid", "invalid", "invalid")
_, err = imgStore.GetReferrers("invalid", "invalid", "invalid")
So(err, ShouldNotBeNil)
_, err = il.GetReferrers(repoName, "invalid", "invalid")
_, err = imgStore.GetReferrers(repoName, "invalid", "invalid")
So(err, ShouldNotBeNil)
_, err = il.GetReferrers(repoName, d.String(), "invalid")
_, err = imgStore.GetReferrers(repoName, digest.String(), "invalid")
So(err, ShouldNotBeNil)
// invalid DeleteImageManifest
indexPath := path.Join(il.RootDir(), repoName, "index.json")
err = os.Chmod(indexPath, 0000)
indexPath := path.Join(imgStore.RootDir(), repoName, "index.json")
err = os.Chmod(indexPath, 0o000)
if err != nil {
panic(err)
}
err = il.DeleteImageManifest(repoName, d.String())
err = imgStore.DeleteImageManifest(repoName, digest.String())
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(il.RootDir(), repoName))
err = os.RemoveAll(path.Join(imgStore.RootDir(), repoName))
if err != nil {
panic(err)
}
@ -160,108 +160,105 @@ func TestDedupeLinks(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Dedupe", t, func(c C) {
blobDigest1 := ""
blobDigest2 := ""
// manifest1
v, err := il.NewBlobUpload("dedupe1")
upload, err := imgStore.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("dedupe1", v, buf)
buflen := buf.Len()
digest := godigest.FromBytes(content)
blob, err := imgStore.PutBlobChunkStreamed("dedupe1", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest1 = strings.Split(d.String(), ":")[1]
So(blob, ShouldEqual, buflen)
blobDigest1 := strings.Split(digest.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe1", v, buf, d.String())
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
_, _, err = il.CheckBlob("dedupe1", d.String())
_, _, err = imgStore.CheckBlob("dedupe1", digest.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
_, _, err = imgStore.GetBlob("dedupe1", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
manifest := ispec.Manifest{}
manifest.SchemaVersion = 2
manifest = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe1", d.String(), ispec.MediaTypeImageManifest, mb)
manifest.SchemaVersion = 2
manifestBuf, _ := json.Marshal(manifest)
digest = godigest.FromBytes(manifestBuf)
_, err = imgStore.PutImageManifest("dedupe1", digest.String(), ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe1", d.String())
_, _, _, err = imgStore.GetImageManifest("dedupe1", digest.String())
So(err, ShouldBeNil)
// manifest2
v, err = il.NewBlobUpload("dedupe2")
upload, err = imgStore.NewBlobUpload("dedupe2")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content = []byte("test-data3")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("dedupe2", v, buf)
buflen = buf.Len()
digest = godigest.FromBytes(content)
blob, err = imgStore.PutBlobChunkStreamed("dedupe2", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest2 = strings.Split(d.String(), ":")[1]
So(blob, ShouldEqual, buflen)
blobDigest2 := strings.Split(digest.String(), ":")[1]
So(blobDigest2, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
_, _, err = il.CheckBlob("dedupe2", d.String())
_, _, err = imgStore.CheckBlob("dedupe2", digest.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
_, _, err = imgStore.GetBlob("dedupe2", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m = ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
manifest = ispec.Manifest{}
manifest.SchemaVersion = 2
manifest = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, mb)
manifest.SchemaVersion = 2
manifestBuf, _ = json.Marshal(manifest)
digest = godigest.FromBytes(manifestBuf)
_, err = imgStore.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe2", d.String())
_, _, _, err = imgStore.GetImageManifest("dedupe2", digest.String())
So(err, ShouldBeNil)
// verify that dedupe with hard links happened
@ -323,33 +320,33 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
err = os.Chmod(dir, 0000) // remove all perms
err = os.Chmod(dir, 0o000) // remove all perms
if err != nil {
panic(err)
}
if os.Geteuid() != 0 {
err = il.InitRepo("test")
err = imgStore.InitRepo("test")
So(err, ShouldNotBeNil)
}
err = os.Chmod(dir, 0755)
err = os.Chmod(dir, 0o755)
if err != nil {
panic(err)
}
// Init repo should fail if repo is a file.
err = ioutil.WriteFile(path.Join(dir, "file-test"), []byte("this is test file"), 0755) // nolint:gosec
err = ioutil.WriteFile(path.Join(dir, "file-test"), []byte("this is test file"), 0o755) // nolint:gosec
So(err, ShouldBeNil)
err = il.InitRepo("file-test")
err = imgStore.InitRepo("file-test")
So(err, ShouldNotBeNil)
err = os.Mkdir(path.Join(dir, "test-dir"), 0755)
err = os.Mkdir(path.Join(dir, "test-dir"), 0o755)
So(err, ShouldBeNil)
err = il.InitRepo("test-dir")
err = imgStore.InitRepo("test-dir")
So(err, ShouldBeNil)
})
@ -362,43 +359,43 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
err = os.MkdirAll(path.Join(dir, "invalid-test"), 0755)
err = os.MkdirAll(path.Join(dir, "invalid-test"), 0o755)
So(err, ShouldBeNil)
err = os.Chmod(path.Join(dir, "invalid-test"), 0000) // remove all perms
err = os.Chmod(path.Join(dir, "invalid-test"), 0o000) // remove all perms
if err != nil {
panic(err)
}
_, err = il.ValidateRepo("invalid-test")
_, err = imgStore.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrRepoNotFound)
err = os.Chmod(path.Join(dir, "invalid-test"), 0755) // remove all perms
err = os.Chmod(path.Join(dir, "invalid-test"), 0o755) // remove all perms
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "blobs"), []byte{}, 0755) // nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "blobs"), []byte{}, 0o755) // nolint: gosec
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "index.json"), []byte{}, 0755) // nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "index.json"), []byte{}, 0o755) // nolint: gosec
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte{}, 0755) // nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte{}, 0o755) // nolint: gosec
if err != nil {
panic(err)
}
isValid, err := il.ValidateRepo("invalid-test")
isValid, err := imgStore.ValidateRepo("invalid-test")
So(err, ShouldBeNil)
So(isValid, ShouldEqual, false)
@ -406,20 +403,20 @@ func TestNegativeCases(t *testing.T) {
if err != nil {
panic(err)
}
err = os.Mkdir(path.Join(dir, "invalid-test", "blobs"), 0755)
err = os.Mkdir(path.Join(dir, "invalid-test", "blobs"), 0o755)
if err != nil {
panic(err)
}
isValid, err = il.ValidateRepo("invalid-test")
isValid, err = imgStore.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(isValid, ShouldEqual, false)
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte("{}"), 0755) // nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte("{}"), 0o755) // nolint: gosec
if err != nil {
panic(err)
}
isValid, err = il.ValidateRepo("invalid-test")
isValid, err = imgStore.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrRepoBadVersion)
So(isValid, ShouldEqual, false)
@ -433,7 +430,7 @@ func TestNegativeCases(t *testing.T) {
os.Remove(path.Join(dir, "test", f.Name()))
}
_, err = il.ValidateRepo("test")
_, err = imgStore.ValidateRepo("test")
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(dir, "test"))
@ -441,19 +438,19 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
_, err = il.ValidateRepo("test")
_, err = imgStore.ValidateRepo("test")
So(err, ShouldNotBeNil)
err = os.Chmod(dir, 0000) // remove all perms
err = os.Chmod(dir, 0o000) // remove all perms
if err != nil {
panic(err)
}
if os.Geteuid() != 0 {
So(func() { _, _ = il.ValidateRepo("test") }, ShouldPanic)
So(func() { _, _ = imgStore.ValidateRepo("test") }, ShouldPanic)
}
err = os.Chmod(dir, 0755) // remove all perms
err = os.Chmod(dir, 0o755) // remove all perms
if err != nil {
panic(err)
}
@ -463,7 +460,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
_, err = il.GetRepositories()
_, err = imgStore.GetRepositories()
So(err, ShouldNotBeNil)
})
@ -480,17 +477,17 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
_, err = il.GetImageTags("test")
_, err = imgStore.GetImageTags("test")
So(err, ShouldNotBeNil)
So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil)
_, err = il.GetImageTags("test")
So(imgStore.InitRepo("test"), ShouldBeNil)
So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0o600), ShouldBeNil)
_, err = imgStore.GetImageTags("test")
So(err, ShouldNotBeNil)
})
@ -507,17 +504,17 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", "index.json"), 0000)
err = os.Chmod(path.Join(dir, "test", "index.json"), 0o000)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
_, _, _, err = imgStore.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
err = os.Remove(path.Join(dir, "test", "index.json"))
@ -525,7 +522,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
_, _, _, err = imgStore.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(dir, "test"))
@ -533,13 +530,13 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
err = ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600)
err = ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0o600)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
_, _, _, err = imgStore.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
})
@ -552,45 +549,45 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0000)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0o000)
if err != nil {
panic(err)
}
_, err = il.NewBlobUpload("test")
_, err = imgStore.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test"), 0000)
err = os.Chmod(path.Join(dir, "test"), 0o000)
if err != nil {
panic(err)
}
_, err = il.NewBlobUpload("test")
_, err = imgStore.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test"), 0755)
err = os.Chmod(path.Join(dir, "test"), 0o755)
if err != nil {
panic(err)
}
So(il.InitRepo("test"), ShouldBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
_, err = il.NewBlobUpload("test")
_, err = imgStore.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0755)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0o755)
if err != nil {
panic(err)
}
v, err := il.NewBlobUpload("test")
upload, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0000)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0o000)
if err != nil {
panic(err)
}
@ -598,10 +595,10 @@ func TestNegativeCases(t *testing.T) {
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
_, err = il.PutBlobChunkStreamed("test", v, buf)
_, err = imgStore.PutBlobChunkStreamed("test", upload, buf)
So(err, ShouldNotBeNil)
_, err = il.PutBlobChunk("test", v, 0, int64(l), buf)
_, err = imgStore.PutBlobChunk("test", upload, 0, int64(l), buf)
So(err, ShouldNotBeNil)
})
@ -614,52 +611,52 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
imgStore := storage.NewImageStore(dir, true, true, log, metrics)
v, err := il.NewBlobUpload("dedupe1")
upload, err := imgStore.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("dedupe1", v, buf)
buflen := buf.Len()
digest := godigest.FromBytes(content)
blob, err := imgStore.PutBlobChunkStreamed("dedupe1", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
blobDigest1 := strings.Split(d.String(), ":")[1]
blobDigest1 := strings.Split(digest.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe1", v, buf, d.String())
err = imgStore.FinishBlobUpload("dedupe1", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
// Create a file at the same place where FinishBlobUpload will create
err = il.InitRepo("dedupe2")
err = imgStore.InitRepo("dedupe2")
So(err, ShouldBeNil)
err = os.MkdirAll(path.Join(dir, "dedupe2", "blobs/sha256"), 0755)
err = os.MkdirAll(path.Join(dir, "dedupe2", "blobs/sha256"), 0o755)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1), content, 0755) // nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1), content, 0o755) // nolint: gosec
if err != nil {
panic(err)
}
v, err = il.NewBlobUpload("dedupe2")
upload, err = imgStore.NewBlobUpload("dedupe2")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content = []byte("test-data3")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("dedupe2", v, buf)
buflen = buf.Len()
digest = godigest.FromBytes(content)
blob, err = imgStore.PutBlobChunkStreamed("dedupe2", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
cmd := exec.Command("sudo", "chattr", "+i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec
_, err = cmd.Output()
@ -667,9 +664,9 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
So(err, ShouldNotBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
cmd = exec.Command("sudo", "chattr", "-i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec
_, err = cmd.Output()
@ -677,9 +674,9 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
err = imgStore.FinishBlobUpload("dedupe2", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
})
Convey("DirExists call with a filename as argument", t, func(c C) {
@ -690,7 +687,7 @@ func TestNegativeCases(t *testing.T) {
defer os.RemoveAll(dir)
filePath := path.Join(dir, "file.txt")
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0o644) //nolint: gosec
if err != nil {
panic(err)
}
@ -704,10 +701,12 @@ func TestHardLink(t *testing.T) {
Convey("Test that ValidateHardLink creates rootDir if it does not exist", t, func() {
var randomDir string
rand.Seed(time.Now().UnixNano())
for {
randomLen := rand.Intn(100)
randomDir = "/tmp/" + randSeq(randomLen)
nBig, err := rand.Int(rand.Reader, big.NewInt(100))
if err != nil {
panic(err)
}
randomDir = "/tmp/" + randSeq(int(nBig.Int64()))
if _, err := os.Stat(randomDir); os.IsNotExist(err) {
break
@ -726,7 +725,7 @@ func TestHardLink(t *testing.T) {
defer os.RemoveAll(dir)
filePath := path.Join(dir, "file.txt")
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0o644) //nolint: gosec
if err != nil {
panic(err)
}
@ -744,12 +743,12 @@ func TestHardLink(t *testing.T) {
err = storage.ValidateHardLink(dir)
So(err, ShouldBeNil)
err = ioutil.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0644) //nolint: gosec
err = ioutil.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0o644) //nolint: gosec
if err != nil {
panic(err)
}
err = os.Chmod(dir, 0400)
err = os.Chmod(dir, 0o400)
if err != nil {
panic(err)
}
@ -757,7 +756,7 @@ func TestHardLink(t *testing.T) {
err = os.Link(path.Join(dir, "hardtest.txt"), path.Join(dir, "duphardtest.txt"))
So(err, ShouldNotBeNil)
err = os.Chmod(dir, 0644)
err = os.Chmod(dir, 0o644)
if err != nil {
panic(err)
}
@ -765,12 +764,17 @@ func TestHardLink(t *testing.T) {
}
func randSeq(n int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
buf := make([]rune, n)
for index := range buf {
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
if err != nil {
panic(err)
}
return string(b)
buf[index] = letters[int(nBig.Int64())]
}
return string(buf)
}

View File

@ -11,10 +11,12 @@ import (
"path"
"strings"
"sync"
//"strings"
"testing"
// Add s3 support.
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
guuid "github.com/gofrs/uuid"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -25,25 +27,21 @@ import (
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/s3"
// Add s3 support
"github.com/docker/distribution/registry/storage/driver"
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
)
func cleanupStorage(store storageDriver.StorageDriver, name string) {
func cleanupStorage(store driver.StorageDriver, name string) {
_ = store.Delete(context.Background(), name)
}
func skipIt(t *testing.T) {
t.Helper()
if os.Getenv("S3MOCK_ENDPOINT") == "" {
t.Skip("Skipping testing without AWS S3 mock server")
}
}
func createObjectsStore(rootDir string) (storageDriver.StorageDriver, storage.ImageStore, error) {
func createObjectsStore(rootDir string) (driver.StorageDriver, storage.ImageStore, error) {
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := map[string]interface{}{
@ -96,7 +94,7 @@ func TestStorageAPIs(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
var il storage.ImageStore
var imgStore storage.ImageStore
if testcase.storageType == "s3" {
skipIt(t)
@ -108,7 +106,7 @@ func TestStorageAPIs(t *testing.T) {
testDir := path.Join("/oci-repo-test", uuid.String())
var store driver.StorageDriver
store, il, _ = createObjectsStore(testDir)
store, imgStore, _ = createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
} else {
dir, err := ioutil.TempDir("", "oci-repo-test")
@ -120,44 +118,44 @@ func TestStorageAPIs(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il = storage.NewImageStore(dir, true, true, log, metrics)
imgStore = storage.NewImageStore(dir, true, true, log, metrics)
}
Convey("Repo layout", t, func(c C) {
repoName := "test"
Convey("Validate repo without initialization", func() {
v, err := il.ValidateRepo(repoName)
v, err := imgStore.ValidateRepo(repoName)
So(v, ShouldEqual, false)
So(err, ShouldNotBeNil)
ok := il.DirExists(path.Join(il.RootDir(), repoName))
ok := imgStore.DirExists(path.Join(imgStore.RootDir(), repoName))
So(ok, ShouldBeFalse)
})
Convey("Initialize repo", func() {
err := il.InitRepo(repoName)
err := imgStore.InitRepo(repoName)
So(err, ShouldBeNil)
ok := il.DirExists(path.Join(il.RootDir(), repoName))
ok := imgStore.DirExists(path.Join(imgStore.RootDir(), repoName))
So(ok, ShouldBeTrue)
storeController := storage.StoreController{}
storeController.DefaultStore = il
So(storeController.GetImageStore("test"), ShouldResemble, il)
storeController.DefaultStore = imgStore
So(storeController.GetImageStore("test"), ShouldResemble, imgStore)
})
Convey("Validate repo", func() {
v, err := il.ValidateRepo(repoName)
v, err := imgStore.ValidateRepo(repoName)
So(err, ShouldBeNil)
So(v, ShouldEqual, true)
})
Convey("Get repos", func() {
v, err := il.GetRepositories()
v, err := imgStore.GetRepositories()
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
})
Convey("Get image tags", func() {
v, err := il.GetImageTags("test")
v, err := imgStore.GetImageTags("test")
So(err, ShouldBeNil)
So(v, ShouldBeEmpty)
})
@ -165,37 +163,37 @@ func TestStorageAPIs(t *testing.T) {
Convey("Full blob upload", func() {
body := []byte("this is a blob")
buf := bytes.NewBuffer(body)
d := godigest.FromBytes(body)
u, n, err := il.FullBlobUpload("test", buf, d.String())
digest := godigest.FromBytes(body)
upload, n, err := imgStore.FullBlobUpload("test", buf, digest.String())
So(err, ShouldBeNil)
So(n, ShouldEqual, len(body))
So(u, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
})
Convey("New blob upload", func() {
v, err := il.NewBlobUpload("test")
upload, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
err = il.DeleteBlobUpload("test", v)
err = imgStore.DeleteBlobUpload("test", upload)
So(err, ShouldBeNil)
v, err = il.NewBlobUpload("test")
upload, err = imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
Convey("Get blob upload", func() {
b, err := il.GetBlobUpload("test", "invalid")
bupload, err := imgStore.GetBlobUpload("test", "invalid")
So(err, ShouldNotBeNil)
So(b, ShouldEqual, -1)
So(bupload, ShouldEqual, -1)
b, err = il.GetBlobUpload("test", v)
bupload, err = imgStore.GetBlobUpload("test", upload)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
So(bupload, ShouldBeGreaterThanOrEqualTo, 0)
b, err = il.BlobUploadInfo("test", v)
bupload, err = imgStore.BlobUploadInfo("test", upload)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
So(bupload, ShouldBeGreaterThanOrEqualTo, 0)
content := []byte("test-data1")
firstChunkContent := []byte("test")
@ -206,281 +204,285 @@ func TestStorageAPIs(t *testing.T) {
secondChunkLen := secondChunkBuf.Len()
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
blobDigest := d
buflen := buf.Len()
digest := godigest.FromBytes(content)
blobDigest := digest
// invalid chunk range
_, err = il.PutBlobChunk("test", v, 10, int64(l), buf)
_, err = imgStore.PutBlobChunk("test", upload, 10, int64(buflen), buf)
So(err, ShouldNotBeNil)
b, err = il.PutBlobChunk("test", v, 0, int64(firstChunkLen), firstChunkBuf)
bupload, err = imgStore.PutBlobChunk("test", upload, 0, int64(firstChunkLen), firstChunkBuf)
So(err, ShouldBeNil)
So(b, ShouldEqual, firstChunkLen)
So(bupload, ShouldEqual, firstChunkLen)
b, err = il.GetBlobUpload("test", v)
bupload, err = imgStore.GetBlobUpload("test", upload)
So(err, ShouldBeNil)
So(b, ShouldEqual, int64(firstChunkLen))
So(bupload, ShouldEqual, int64(firstChunkLen))
b, err = il.BlobUploadInfo("test", v)
bupload, err = imgStore.BlobUploadInfo("test", upload)
So(err, ShouldBeNil)
So(b, ShouldEqual, int64(firstChunkLen))
So(bupload, ShouldEqual, int64(firstChunkLen))
b, err = il.PutBlobChunk("test", v, int64(firstChunkLen), int64(l), secondChunkBuf)
bupload, err = imgStore.PutBlobChunk("test", upload, int64(firstChunkLen), int64(buflen), secondChunkBuf)
So(err, ShouldBeNil)
So(b, ShouldEqual, secondChunkLen)
So(bupload, ShouldEqual, secondChunkLen)
err = il.FinishBlobUpload("test", v, buf, d.String())
err = imgStore.FinishBlobUpload("test", upload, buf, digest.String())
So(err, ShouldBeNil)
_, _, err = il.CheckBlob("test", d.String())
_, _, err = imgStore.CheckBlob("test", digest.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
_, _, err = imgStore.GetBlob("test", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
manifest := ispec.Manifest{}
manifest.SchemaVersion = 2
manifestBuf, _ := json.Marshal(manifest)
Convey("Bad image manifest", func() {
_, err = il.PutImageManifest("test", d.String(), "application/json", mb)
_, err = imgStore.PutImageManifest("test", digest.String(), "application/json",
manifestBuf)
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte{})
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest,
[]byte{})
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte(`{"test":true}`))
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest,
[]byte(`{"test":true}`))
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest,
manifestBuf)
So(err, ShouldNotBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldNotBeNil)
_, _, _, err = il.GetImageManifest("inexistent", d.String())
_, _, _, err = imgStore.GetImageManifest("inexistent", digest.String())
So(err, ShouldNotBeNil)
})
Convey("Good image manifest", func() {
annotationsMap := make(map[string]string)
annotationsMap[ispec.AnnotationRefName] = "1.0"
m := ispec.Manifest{
manifest := ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
Annotations: annotationsMap,
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d := godigest.FromBytes(mb)
manifest.SchemaVersion = 2
manifestBuf, _ = json.Marshal(manifest)
digest := godigest.FromBytes(manifestBuf)
// bad manifest
m.Layers[0].Digest = godigest.FromBytes([]byte("inexistent"))
badMb, _ := json.Marshal(m)
manifest.Layers[0].Digest = godigest.FromBytes([]byte("inexistent"))
badMb, _ := json.Marshal(manifest)
_, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, badMb)
_, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, badMb)
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
// same manifest for coverage
_, err = il.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, err = il.PutImageManifest("test", "2.0", ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", "2.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, err := il.PutImageManifest("test", "3.0", ispec.MediaTypeImageManifest, mb)
_, err := imgStore.PutImageManifest("test", "3.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, err = il.GetImageTags("inexistent")
_, err = imgStore.GetImageTags("inexistent")
So(err, ShouldNotBeNil)
// total tags should be 3 but they have same reference.
tags, err := il.GetImageTags("test")
tags, err := imgStore.GetImageTags("test")
So(err, ShouldBeNil)
So(len(tags), ShouldEqual, 3)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", "3.0")
_, _, _, err = imgStore.GetImageManifest("test", "3.0")
So(err, ShouldBeNil)
err = il.DeleteImageManifest("test", "1.0")
err = imgStore.DeleteImageManifest("test", "1.0")
So(err, ShouldBeNil)
tags, err = il.GetImageTags("test")
tags, err = imgStore.GetImageTags("test")
So(err, ShouldBeNil)
So(len(tags), ShouldEqual, 2)
// We deleted only one tag, make sure blob should not be removed.
hasBlob, _, err := il.CheckBlob("test", d.String())
hasBlob, _, err := imgStore.CheckBlob("test", digest.String())
So(err, ShouldBeNil)
So(hasBlob, ShouldEqual, true)
// If we pass reference all manifest with input reference should be deleted.
err = il.DeleteImageManifest("test", d.String())
err = imgStore.DeleteImageManifest("test", digest.String())
So(err, ShouldBeNil)
tags, err = il.GetImageTags("test")
tags, err = imgStore.GetImageTags("test")
So(err, ShouldBeNil)
So(len(tags), ShouldEqual, 0)
// All tags/references are deleted, blob should not be present in disk.
hasBlob, _, err = il.CheckBlob("test", d.String())
hasBlob, _, err = imgStore.CheckBlob("test", digest.String())
So(err, ShouldNotBeNil)
So(hasBlob, ShouldEqual, false)
err = il.DeleteBlob("test", "inexistent")
err = imgStore.DeleteBlob("test", "inexistent")
So(err, ShouldNotBeNil)
err = il.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")).String())
err = imgStore.DeleteBlob("test", godigest.FromBytes([]byte("inexistent")).String())
So(err, ShouldNotBeNil)
err = il.DeleteBlob("test", blobDigest.String())
err = imgStore.DeleteBlob("test", blobDigest.String())
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldNotBeNil)
})
})
err = il.DeleteBlobUpload("test", v)
err = imgStore.DeleteBlobUpload("test", upload)
So(err, ShouldNotBeNil)
})
Convey("New blob upload streamed", func() {
v, err := il.NewBlobUpload("test")
bupload, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(bupload, ShouldNotBeEmpty)
Convey("Get blob upload", func() {
err = il.FinishBlobUpload("test", v, bytes.NewBuffer([]byte{}), "inexistent")
err = imgStore.FinishBlobUpload("test", bupload, bytes.NewBuffer([]byte{}), "inexistent")
So(err, ShouldNotBeNil)
b, err := il.GetBlobUpload("test", "invalid")
upload, err := imgStore.GetBlobUpload("test", "invalid")
So(err, ShouldNotBeNil)
So(b, ShouldEqual, -1)
So(upload, ShouldEqual, -1)
b, err = il.GetBlobUpload("test", v)
upload, err = imgStore.GetBlobUpload("test", bupload)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
So(upload, ShouldBeGreaterThanOrEqualTo, 0)
_, err = il.BlobUploadInfo("test", "inexistent")
_, err = imgStore.BlobUploadInfo("test", "inexistent")
So(err, ShouldNotBeNil)
b, err = il.BlobUploadInfo("test", v)
upload, err = imgStore.BlobUploadInfo("test", bupload)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
So(upload, ShouldBeGreaterThanOrEqualTo, 0)
content := []byte("test-data2")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("test", v, buf)
buflen := buf.Len()
digest := godigest.FromBytes(content)
upload, err = imgStore.PutBlobChunkStreamed("test", bupload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(upload, ShouldEqual, buflen)
_, err = il.PutBlobChunkStreamed("test", "inexistent", buf)
_, err = imgStore.PutBlobChunkStreamed("test", "inexistent", buf)
So(err, ShouldNotBeNil)
err = il.FinishBlobUpload("test", "inexistent", buf, d.String())
err = imgStore.FinishBlobUpload("test", "inexistent", buf, digest.String())
So(err, ShouldNotBeNil)
err = il.FinishBlobUpload("test", v, buf, d.String())
err = imgStore.FinishBlobUpload("test", bupload, buf, digest.String())
So(err, ShouldBeNil)
_, _, err = il.CheckBlob("test", d.String())
_, _, err = imgStore.CheckBlob("test", digest.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("test", "inexistent", "application/vnd.oci.image.layer.v1.tar+gzip")
_, _, err = imgStore.GetBlob("test", "inexistent", "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldNotBeNil)
_, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
_, _, err = imgStore.GetBlob("test", digest.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
blobContent, err := il.GetBlobContent("test", d.String())
blobContent, err := imgStore.GetBlobContent("test", digest.String())
So(err, ShouldBeNil)
So(content, ShouldResemble, blobContent)
_, err = il.GetBlobContent("inexistent", d.String())
_, err = imgStore.GetBlobContent("inexistent", digest.String())
So(err, ShouldNotBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
manifest := ispec.Manifest{}
manifest.SchemaVersion = 2
manifestBuf, _ := json.Marshal(manifest)
Convey("Bad digests", func() {
_, _, err := il.FullBlobUpload("test", bytes.NewBuffer([]byte{}), "inexistent")
_, _, err := imgStore.FullBlobUpload("test", bytes.NewBuffer([]byte{}), "inexistent")
So(err, ShouldNotBeNil)
_, _, err = il.CheckBlob("test", "inexistent")
_, _, err = imgStore.CheckBlob("test", "inexistent")
So(err, ShouldNotBeNil)
})
Convey("Bad image manifest", func() {
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte("bad json"))
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest, []byte("bad json"))
So(err, ShouldNotBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldNotBeNil)
})
Convey("Good image manifest", func() {
m := ispec.Manifest{
manifest := ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d := godigest.FromBytes(mb)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
manifest.SchemaVersion = 2
manifestBuf, _ = json.Marshal(manifest)
digest := godigest.FromBytes(manifestBuf)
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
// same manifest for coverage
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
_, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldBeNil)
_, err = il.GetIndexContent("inexistent")
_, err = imgStore.GetIndexContent("inexistent")
So(err, ShouldNotBeNil)
indexContent, err := il.GetIndexContent("test")
indexContent, err := imgStore.GetIndexContent("test")
So(err, ShouldBeNil)
if testcase.storageType == "fs" {
err = os.Chmod(path.Join(il.RootDir(), "test", "index.json"), 0000)
err = os.Chmod(path.Join(imgStore.RootDir(), "test", "index.json"), 0o000)
So(err, ShouldBeNil)
_, err = il.GetIndexContent("test")
_, err = imgStore.GetIndexContent("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), "test", "index.json"), 0644)
err = os.Chmod(path.Join(imgStore.RootDir(), "test", "index.json"), 0o644)
So(err, ShouldBeNil)
}
@ -490,104 +492,104 @@ func TestStorageAPIs(t *testing.T) {
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 1)
err = il.DeleteImageManifest("test", "1.0")
err = imgStore.DeleteImageManifest("test", "1.0")
So(err, ShouldNotBeNil)
err = il.DeleteImageManifest("inexistent", "1.0")
err = imgStore.DeleteImageManifest("inexistent", "1.0")
So(err, ShouldNotBeNil)
err = il.DeleteImageManifest("test", d.String())
err = imgStore.DeleteImageManifest("test", digest.String())
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
_, _, _, err = imgStore.GetImageManifest("test", digest.String())
So(err, ShouldNotBeNil)
})
})
err = il.DeleteBlobUpload("test", v)
err = imgStore.DeleteBlobUpload("test", bupload)
So(err, ShouldNotBeNil)
})
Convey("Modify manifest in-place", func() {
// original blob
v, err := il.NewBlobUpload("replace")
upload, err := imgStore.NewBlobUpload("replace")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content := []byte("test-data-replace-1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("replace", v, buf)
buflen := buf.Len()
digest := godigest.FromBytes(content)
blob, err := imgStore.PutBlobChunkStreamed("replace", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest1 := strings.Split(d.String(), ":")[1]
So(blob, ShouldEqual, buflen)
blobDigest1 := strings.Split(digest.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("replace", v, buf, d.String())
err = imgStore.FinishBlobUpload("replace", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
m := ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
manifest := ispec.Manifest{}
manifest.SchemaVersion = 2
manifest = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb)
manifest.SchemaVersion = 2
manifestBuf, _ := json.Marshal(manifest)
digest = godigest.FromBytes(manifestBuf)
_, err = imgStore.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("replace", d.String())
_, _, _, err = imgStore.GetImageManifest("replace", digest.String())
So(err, ShouldBeNil)
// new blob to replace
v, err = il.NewBlobUpload("replace")
upload, err = imgStore.NewBlobUpload("replace")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
So(upload, ShouldNotBeEmpty)
content = []byte("test-data-replace-2")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("replace", v, buf)
buflen = buf.Len()
digest = godigest.FromBytes(content)
blob, err = imgStore.PutBlobChunkStreamed("replace", upload, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest2 := strings.Split(d.String(), ":")[1]
So(blob, ShouldEqual, buflen)
blobDigest2 := strings.Split(digest.String(), ":")[1]
So(blobDigest2, ShouldNotBeEmpty)
err = il.FinishBlobUpload("replace", v, buf, d.String())
err = imgStore.FinishBlobUpload("replace", upload, buf, digest.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
So(blob, ShouldEqual, buflen)
m = ispec.Manifest{
manifest = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
Digest: digest,
Size: int64(buflen),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
_ = godigest.FromBytes(mb)
_, err = il.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, mb)
manifest.SchemaVersion = 2
manifestBuf, _ = json.Marshal(manifest)
_ = godigest.FromBytes(manifestBuf)
_, err = imgStore.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, manifestBuf)
So(err, ShouldBeNil)
})
@ -598,15 +600,15 @@ func TestStorageAPIs(t *testing.T) {
wg.Add(2)
go func() {
defer wg.Done()
il.Lock()
imgStore.Lock()
func() {}()
il.Unlock()
imgStore.Unlock()
}()
go func() {
defer wg.Done()
il.RLock()
imgStore.RLock()
func() {}()
il.RUnlock()
imgStore.RUnlock()
}()
}
wg.Wait()
@ -690,17 +692,17 @@ func TestStorageHandler(t *testing.T) {
storeController.SubStore = subStore
is := storeController.GetImageStore("zot-x-test")
So(is.RootDir(), ShouldEqual, firstRootDir)
imgStore := storeController.GetImageStore("zot-x-test")
So(imgStore.RootDir(), ShouldEqual, firstRootDir)
is = storeController.GetImageStore("a/zot-a-test")
So(is.RootDir(), ShouldEqual, secondRootDir)
imgStore = storeController.GetImageStore("a/zot-a-test")
So(imgStore.RootDir(), ShouldEqual, secondRootDir)
is = storeController.GetImageStore("b/zot-b-test")
So(is.RootDir(), ShouldEqual, thirdRootDir)
imgStore = storeController.GetImageStore("b/zot-b-test")
So(imgStore.RootDir(), ShouldEqual, thirdRootDir)
is = storeController.GetImageStore("c/zot-c-test")
So(is.RootDir(), ShouldEqual, firstRootDir)
imgStore = storeController.GetImageStore("c/zot-c-test")
So(imgStore.RootDir(), ShouldEqual, firstRootDir)
})
})
}

View File

@ -1,7 +1,7 @@
build:
from:
type: docker
url: docker://golang:1.16
url: docker://ghcr.io/project-zot/golang:1.16
binds:
- ${{PWD}} -> /zotcopy
run: |

View File

@ -38,22 +38,23 @@ func GetSecureBaseURL(port string) string {
func MakeHtpasswdFile() string {
// bcrypt(username="test", passwd="test")
content := "test:$2y$05$hlbSXDp6hzDLu6VwACS39ORvVRpr3OMR4RlJ31jtlaOEGnPjKZI1m\n"
return MakeHtpasswdFileFromString(content)
}
func MakeHtpasswdFileFromString(fileContent string) string {
f, err := ioutil.TempFile("", "htpasswd-")
htpasswdFile, err := ioutil.TempFile("", "htpasswd-")
if err != nil {
panic(err)
}
// bcrypt(username="test", passwd="test")
content := []byte(fileContent)
if err := ioutil.WriteFile(f.Name(), content, 0600); err != nil {
if err := ioutil.WriteFile(htpasswdFile.Name(), content, 0o600); err != nil { //nolint:gomnd
panic(err)
}
return f.Name()
return htpasswdFile.Name()
}
func Location(baseURL string, resp *resty.Response) string {
@ -63,22 +64,23 @@ func Location(baseURL string, resp *resty.Response) string {
// zot implements the latter as per the spec, but some registries appear to
// return the former - this needs to be clarified
loc := resp.Header().Get("Location")
return baseURL + loc
}
func CopyFiles(sourceDir string, destDir string) error {
sourceMeta, err := os.Stat(sourceDir)
if err != nil {
return err
return fmt.Errorf("CopyFiles os.Stat failed: %w", err)
}
if err := os.MkdirAll(destDir, sourceMeta.Mode()); err != nil {
return err
return fmt.Errorf("CopyFiles os.MkdirAll failed: %w", err)
}
files, err := ioutil.ReadDir(sourceDir)
if err != nil {
return err
return fmt.Errorf("CopyFiles ioutil.ReadDir failed: %w", err)
}
for _, file := range files {
@ -92,18 +94,18 @@ func CopyFiles(sourceDir string, destDir string) error {
} else {
sourceFile, err := os.Open(sourceFilePath)
if err != nil {
return err
return fmt.Errorf("CopyFiles os.Open failed: %w", err)
}
defer sourceFile.Close()
destFile, err := os.Create(destFilePath)
if err != nil {
return err
return fmt.Errorf("CopyFiles os.Create failed: %w", err)
}
defer destFile.Close()
if _, err = io.Copy(destFile, sourceFile); err != nil {
return err
return fmt.Errorf("io.Copy failed: %w", err)
}
}
}

View File

@ -40,7 +40,7 @@ func TestCopyFiles(t *testing.T) {
}
defer os.RemoveAll(dir)
err = os.Chmod(dir, 0300)
err = os.Chmod(dir, 0o300)
So(err, ShouldBeNil)
err = CopyFiles(dir, os.TempDir())
@ -54,7 +54,7 @@ func TestCopyFiles(t *testing.T) {
defer os.RemoveAll(dir)
sdir := "subdir"
err = os.Mkdir(path.Join(dir, sdir), 0300)
err = os.Mkdir(path.Join(dir, sdir), 0o300)
So(err, ShouldBeNil)
err = CopyFiles(dir, os.TempDir())
@ -68,12 +68,12 @@ func TestCopyFiles(t *testing.T) {
defer os.RemoveAll(dir)
filePath := path.Join(dir, "file.txt")
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0o644) //nolint: gosec
if err != nil {
panic(err)
}
err = os.Chmod(filePath, 0300)
err = os.Chmod(filePath, 0o300)
So(err, ShouldBeNil)
err = CopyFiles(dir, os.TempDir())