chore: bump golangci-lint to 1.57.0
Fix all discovered issues. Signed-off-by: Dmitriy Matrenichev <dmitry.matrenichev@siderolabs.com>
This commit is contained in:
parent
6840119632
commit
19f15a840c
@ -15,13 +15,6 @@ run:
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
- .*\\.pb\\.go$
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
build-tags:
|
||||
- integration
|
||||
@ -33,7 +26,12 @@ run:
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
||||
format: line-number
|
||||
formats:
|
||||
- format: line-number
|
||||
path: stdout
|
||||
print-issued-lines: true
|
||||
print-linter-name: true
|
||||
uniq-by-line: true
|
||||
sort-results: true
|
||||
|
||||
# all available settings of specific linters
|
||||
@ -46,12 +44,7 @@ linters-settings:
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: true
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
govet: { }
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
@ -79,6 +72,8 @@ linters-settings:
|
||||
allow-no-explanation: []
|
||||
require-explanation: false
|
||||
require-specific: true
|
||||
unused:
|
||||
local-variables-are-used: false
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
@ -166,6 +161,9 @@ issues:
|
||||
- ^ST1000 # ST1000: at least one file in a package should have a package comment (stylecheck)
|
||||
- parameter '\w+' seems to be unused, consider removing or renaming it as _ # noisy check, especially when the usage is an interface implementation
|
||||
|
||||
exclude-files:
|
||||
- .*\\.pb\\.go$
|
||||
|
||||
exclude-rules:
|
||||
- path: cmd/talosctl/cmd
|
||||
linters:
|
||||
|
@ -938,6 +938,7 @@ FROM base AS lint-go
|
||||
COPY .golangci.yml .
|
||||
ENV GOGC 50
|
||||
ENV GOLANGCI_LINT_CACHE /.cache/lint
|
||||
RUN golangci-lint config verify --config .golangci.yml
|
||||
RUN --mount=type=cache,target=/.cache golangci-lint run --config .golangci.yml
|
||||
WORKDIR /src/pkg/machinery
|
||||
RUN --mount=type=cache,target=/.cache golangci-lint run --config ../../.golangci.yml
|
||||
|
2
Makefile
2
Makefile
@ -55,7 +55,7 @@ GOIMPORTS_VERSION ?= v0.19.0
|
||||
# renovate: datasource=go depName=mvdan.cc/gofumpt
|
||||
GOFUMPT_VERSION ?= v0.6.0
|
||||
# renovate: datasource=go depName=github.com/golangci/golangci-lint
|
||||
GOLANGCILINT_VERSION ?= v1.56.2
|
||||
GOLANGCILINT_VERSION ?= v1.57.0
|
||||
# renovate: datasource=go depName=golang.org/x/tools
|
||||
STRINGER_VERSION ?= v0.19.0
|
||||
# renovate: datasource=go depName=github.com/dmarkham/enumer
|
||||
|
@ -200,8 +200,6 @@ func (m *Manifest) checkMounts(device Device) error {
|
||||
}
|
||||
|
||||
for _, path := range matches {
|
||||
path := path
|
||||
|
||||
if err = func() error {
|
||||
var f *os.File
|
||||
|
||||
@ -390,8 +388,6 @@ func (m *Manifest) executeOnDevice(device Device, targets []*Target) (err error)
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
target := target
|
||||
|
||||
err = retry.Constant(time.Minute, retry.WithUnits(100*time.Millisecond)).Retry(func() error {
|
||||
e := target.Format(m.Printf)
|
||||
if e != nil {
|
||||
|
@ -641,7 +641,7 @@ func create(ctx context.Context, flags *pflag.FlagSet) error {
|
||||
endpointList = []string{ips[0][0].String()}
|
||||
default:
|
||||
// use control plane nodes as endpoints, client-side load-balancing
|
||||
for i := 0; i < controlplanes; i++ {
|
||||
for i := range controlplanes {
|
||||
endpointList = append(endpointList, ips[0][i].String())
|
||||
}
|
||||
}
|
||||
@ -750,7 +750,7 @@ func create(ctx context.Context, flags *pflag.FlagSet) error {
|
||||
provisionOptions = append(provisionOptions, provision.WithTalosConfig(configBundle.TalosConfig()))
|
||||
|
||||
// Create the controlplane nodes.
|
||||
for i := 0; i < controlplanes; i++ {
|
||||
for i := range controlplanes {
|
||||
var cfg config.Provider
|
||||
|
||||
nodeIPs := make([]netip.Addr, len(cidrs))
|
||||
@ -796,7 +796,7 @@ func create(ctx context.Context, flags *pflag.FlagSet) error {
|
||||
}
|
||||
|
||||
// append extra disks
|
||||
for i := 0; i < extraDisks; i++ {
|
||||
for range extraDisks {
|
||||
disks = append(disks, &provision.Disk{
|
||||
Size: uint64(extraDiskSize) * 1024 * 1024,
|
||||
SkipPreallocate: !clusterDiskPreallocate,
|
||||
|
@ -90,7 +90,6 @@ Certificate expires: 10 years from now (2031-07-03)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -69,13 +69,12 @@ func containerRender(remotePeer *peer.Peer, resp *machineapi.ContainersResponse)
|
||||
defaultNode := client.AddrFromPeer(remotePeer)
|
||||
|
||||
for _, msg := range resp.Messages {
|
||||
resp := msg
|
||||
sort.Slice(resp.Containers,
|
||||
sort.Slice(msg.Containers,
|
||||
func(i, j int) bool {
|
||||
return strings.Compare(resp.Containers[i].Id, resp.Containers[j].Id) < 0
|
||||
return strings.Compare(msg.Containers[i].Id, msg.Containers[j].Id) < 0
|
||||
})
|
||||
|
||||
for _, p := range resp.Containers {
|
||||
for _, p := range msg.Containers {
|
||||
display := p.Id
|
||||
if p.Id != p.PodId {
|
||||
// container in a sandbox
|
||||
@ -84,8 +83,8 @@ func containerRender(remotePeer *peer.Peer, resp *machineapi.ContainersResponse)
|
||||
|
||||
node := defaultNode
|
||||
|
||||
if resp.Metadata != nil {
|
||||
node = resp.Metadata.Hostname
|
||||
if msg.Metadata != nil {
|
||||
node = msg.Metadata.Hostname
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\n", node, p.Namespace, display, p.Image, p.Pid, p.Status)
|
||||
|
@ -85,7 +85,7 @@ var logsCmd = &cobra.Command{
|
||||
continue
|
||||
}
|
||||
|
||||
node := defaultNode
|
||||
node := defaultNode //nolint:copyloopvar
|
||||
if data.Metadata != nil && data.Metadata.Hostname != "" {
|
||||
node = data.Metadata.Hostname
|
||||
}
|
||||
|
@ -69,13 +69,12 @@ func statsRender(remotePeer *peer.Peer, resp *machineapi.StatsResponse) error {
|
||||
defaultNode := client.AddrFromPeer(remotePeer)
|
||||
|
||||
for _, msg := range resp.Messages {
|
||||
resp := msg
|
||||
sort.Slice(resp.Stats,
|
||||
sort.Slice(msg.Stats,
|
||||
func(i, j int) bool {
|
||||
return strings.Compare(resp.Stats[i].Id, resp.Stats[j].Id) < 0
|
||||
return strings.Compare(msg.Stats[i].Id, msg.Stats[j].Id) < 0
|
||||
})
|
||||
|
||||
for _, s := range resp.Stats {
|
||||
for _, s := range msg.Stats {
|
||||
display := s.Id
|
||||
if s.Id != s.PodId {
|
||||
// container in a sandbox
|
||||
@ -84,8 +83,8 @@ func statsRender(remotePeer *peer.Peer, resp *machineapi.StatsResponse) error {
|
||||
|
||||
node := defaultNode
|
||||
|
||||
if resp.Metadata != nil {
|
||||
node = resp.Metadata.Hostname
|
||||
if msg.Metadata != nil {
|
||||
node = msg.Metadata.Hostname
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%.2f\t%d\n", node, s.Namespace, display, float64(s.MemoryUsage)*1e-6, s.CpuUsage)
|
||||
|
@ -163,8 +163,6 @@ func (a *Tracker) Run() error {
|
||||
var trackEg errgroup.Group
|
||||
|
||||
for _, node := range a.cliContext.Nodes {
|
||||
node := node
|
||||
|
||||
var (
|
||||
dmesg *circular.Buffer
|
||||
err error
|
||||
|
@ -20,8 +20,6 @@ func TestComments(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, path := range testCases {
|
||||
path := path
|
||||
|
||||
t.Run(filepath.Base(path), func(t *testing.T) {
|
||||
in, err := os.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
@ -89,9 +89,6 @@ func (au *AWSUploader) RegisterAMIs(ctx context.Context) error {
|
||||
g, ctx = errgroup.WithContext(ctx)
|
||||
|
||||
for region, svc := range au.ec2svcs {
|
||||
region := region
|
||||
svc := svc
|
||||
|
||||
g.Go(func() error {
|
||||
err := au.registerAMI(ctx, region, svc)
|
||||
if err != nil {
|
||||
@ -147,8 +144,6 @@ func (au *AWSUploader) registerAMI(ctx context.Context, region string, svc *ec2.
|
||||
var g errgroup.Group
|
||||
|
||||
for _, arch := range au.Options.Architectures {
|
||||
arch := arch
|
||||
|
||||
g.Go(func() error {
|
||||
err = au.registerAMIArch(ctx, region, svc, arch, bucketName, uploader)
|
||||
if err != nil {
|
||||
|
@ -102,8 +102,6 @@ func (azu *AzureUploader) AzureGalleryUpload(ctx context.Context) error {
|
||||
log.Printf("azure: uploading blobs for architectures: %+v\n", azu.Options.Architectures)
|
||||
|
||||
for _, arch := range azu.Options.Architectures {
|
||||
arch := arch
|
||||
|
||||
g.Go(func() error {
|
||||
log.Printf("azure: starting upload blob for %s\n", arch)
|
||||
|
||||
@ -190,7 +188,7 @@ func (azu *AzureUploader) uploadAzureBlob(ctx context.Context, arch string) erro
|
||||
var g *errgroup.Group
|
||||
g, ctx = errgroup.WithContext(ctx)
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
for range concurrency {
|
||||
g.Go(func() error {
|
||||
for w := range workCh {
|
||||
_, err = pageBlobClient.UploadPages(
|
||||
|
@ -24,8 +24,6 @@ func TestRun(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
test := test
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
|
@ -48,7 +48,7 @@ func (suite *APIDFactorySuite) TestGetConcurrent() {
|
||||
|
||||
backendCh := make(chan proxy.Backend, 10)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
|
@ -214,11 +214,11 @@ func TestAPIIdiosyncrasies(t *testing.T) {
|
||||
storage.File_storage_storage_proto.Services(),
|
||||
time.File_time_time_proto.Services(),
|
||||
} {
|
||||
for i := 0; i < services.Len(); i++ {
|
||||
for i := range services.Len() {
|
||||
service := services.Get(i)
|
||||
methods := service.Methods()
|
||||
|
||||
for j := 0; j < methods.Len(); j++ {
|
||||
for j := range methods.Len() {
|
||||
method := methods.Get(j)
|
||||
|
||||
t.Run(
|
||||
@ -320,7 +320,7 @@ func testEnum(t *testing.T, enum protoreflect.EnumDescriptor, currentVersion *co
|
||||
testDeprecated(t, enum, currentVersion)
|
||||
|
||||
values := enum.Values()
|
||||
for i := 0; i < values.Len(); i++ {
|
||||
for i := range values.Len() {
|
||||
testDeprecated(t, values.Get(i), currentVersion)
|
||||
}
|
||||
}
|
||||
@ -329,23 +329,23 @@ func testMessage(t *testing.T, message protoreflect.MessageDescriptor, currentVe
|
||||
testDeprecated(t, message, currentVersion)
|
||||
|
||||
fields := message.Fields()
|
||||
for i := 0; i < fields.Len(); i++ {
|
||||
for i := range fields.Len() {
|
||||
testDeprecated(t, fields.Get(i), currentVersion)
|
||||
}
|
||||
|
||||
oneofs := message.Oneofs()
|
||||
for i := 0; i < oneofs.Len(); i++ {
|
||||
for i := range oneofs.Len() {
|
||||
testDeprecated(t, oneofs.Get(i), currentVersion)
|
||||
}
|
||||
|
||||
enums := message.Enums()
|
||||
for i := 0; i < enums.Len(); i++ {
|
||||
for i := range enums.Len() {
|
||||
testEnum(t, enums.Get(i), currentVersion)
|
||||
}
|
||||
|
||||
// test nested messages
|
||||
messages := message.Messages()
|
||||
for i := 0; i < messages.Len(); i++ {
|
||||
for i := range messages.Len() {
|
||||
testMessage(t, messages.Get(i), currentVersion)
|
||||
}
|
||||
}
|
||||
@ -364,22 +364,22 @@ func TestDeprecatedAPIs(t *testing.T) {
|
||||
time.File_time_time_proto,
|
||||
} {
|
||||
enums := file.Enums()
|
||||
for i := 0; i < enums.Len(); i++ {
|
||||
for i := range enums.Len() {
|
||||
testEnum(t, enums.Get(i), currentVersion)
|
||||
}
|
||||
|
||||
messages := file.Messages()
|
||||
for i := 0; i < messages.Len(); i++ {
|
||||
for i := range messages.Len() {
|
||||
testMessage(t, messages.Get(i), currentVersion)
|
||||
}
|
||||
|
||||
services := file.Services()
|
||||
for i := 0; i < services.Len(); i++ {
|
||||
for i := range services.Len() {
|
||||
service := services.Get(i)
|
||||
testDeprecated(t, service, currentVersion)
|
||||
|
||||
methods := service.Methods()
|
||||
for j := 0; j < methods.Len(); j++ {
|
||||
for j := range methods.Len() {
|
||||
method := methods.Get(j)
|
||||
testDeprecated(t, method, currentVersion)
|
||||
|
||||
|
@ -116,8 +116,6 @@ func TestPeerStatus_CalculateState(t *testing.T) {
|
||||
expectedState: kubespan.PeerStateUp,
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peerStatus := kubespan.PeerStatusSpec{
|
||||
LastHandshakeTime: time.Now().Add(-tt.sinceLastHandshake),
|
||||
|
@ -78,8 +78,6 @@ func (ctrl *AffiliateMergeController) Run(ctx context.Context, r controller.Runt
|
||||
touchedIDs := make(map[resource.ID]struct{}, len(mergedAffiliates))
|
||||
|
||||
for id, affiliateSpec := range mergedAffiliates {
|
||||
affiliateSpec := affiliateSpec
|
||||
|
||||
if err = safe.WriterModify(ctx, r, cluster.NewAffiliate(cluster.NamespaceName, id), func(res *cluster.Affiliate) error {
|
||||
*res.TypedSpec() = *affiliateSpec
|
||||
|
||||
|
@ -305,8 +305,6 @@ func (ctrl *DiscoveryServiceController) Run(ctx context.Context, r controller.Ru
|
||||
for _, discoveredAffiliate := range client.GetAffiliates() {
|
||||
id := fmt.Sprintf("service/%s", discoveredAffiliate.Affiliate.NodeId)
|
||||
|
||||
discoveredAffiliate := discoveredAffiliate
|
||||
|
||||
if err = safe.WriterModify(ctx, r, cluster.NewAffiliate(cluster.RawNamespaceName, id), func(res *cluster.Affiliate) error {
|
||||
*res.TypedSpec() = specAffiliate(discoveredAffiliate.Affiliate, discoveredAffiliate.Endpoints)
|
||||
|
||||
|
@ -156,8 +156,6 @@ func (ctrl *KubernetesPullController) Run(ctx context.Context, r controller.Runt
|
||||
for _, affilateSpec := range affiliateSpecs {
|
||||
id := fmt.Sprintf("k8s/%s", affilateSpec.NodeID)
|
||||
|
||||
affilateSpec := affilateSpec
|
||||
|
||||
if err = safe.WriterModify(ctx, r, cluster.NewAffiliate(cluster.RawNamespaceName, id), func(res *cluster.Affiliate) error {
|
||||
*res.TypedSpec() = *affilateSpec
|
||||
|
||||
|
@ -303,8 +303,6 @@ func TestNewKubeletConfigurationFail(t *testing.T) {
|
||||
expectedErr: "2 errors occurred:\n\t* field \"authentication\" can't be overridden\n\t* field \"port\" can't be overridden\n\n",
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(
|
||||
tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -460,8 +458,6 @@ func TestNewKubeletConfigurationMerge(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -194,8 +194,6 @@ func (ctrl *KubeletStaticPodController) refreshPodStatus(ctx context.Context, r
|
||||
podsSeen := map[string]struct{}{}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
pod := pod
|
||||
|
||||
switch pod.Metadata.Annotations.ConfigSource {
|
||||
case "file":
|
||||
// static pod from a file source
|
||||
|
@ -107,8 +107,6 @@ func (ctrl *ManifestController) Run(ctx context.Context, r controller.Runtime, l
|
||||
}
|
||||
|
||||
for _, renderedManifest := range renderedManifests {
|
||||
renderedManifest := renderedManifest
|
||||
|
||||
if err = safe.WriterModify(ctx, r, k8s.NewManifest(k8s.ControlPlaneNamespaceName, renderedManifest.name),
|
||||
func(r *k8s.Manifest) error {
|
||||
return k8sadapter.Manifest(r).SetYAML(renderedManifest.data)
|
||||
|
@ -112,8 +112,6 @@ func TestApplyLabels(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -297,8 +295,6 @@ func TestApplyTaints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -395,8 +391,6 @@ func TestApplyCordoned(t *testing.T) {
|
||||
expectedAnnotations: map[string]string{"foo": "bar"},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -230,7 +230,7 @@ func (t *CRDController) Run(ctx context.Context, workers int) error {
|
||||
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
for range workers {
|
||||
go func() {
|
||||
wait.Until(func() { t.runWorker(ctx) }, time.Second, ctx.Done())
|
||||
wg.Done()
|
||||
|
@ -339,8 +339,6 @@ func (ctrl *ManagerController) Run(ctx context.Context, r controller.Runtime, lo
|
||||
|
||||
// update peer statuses
|
||||
for pubKey, peerStatus := range peerStatuses {
|
||||
peerStatus := peerStatus
|
||||
|
||||
if err = safe.WriterModify(ctx, r,
|
||||
kubespan.NewPeerStatus(
|
||||
kubespan.NamespaceName,
|
||||
@ -418,8 +416,6 @@ func (ctrl *ManagerController) Run(ctx context.Context, r controller.Runtime, lo
|
||||
ConfigLayer: network.ConfigOperator,
|
||||
},
|
||||
} {
|
||||
spec := spec
|
||||
|
||||
if err = safe.WriterModify(ctx, r,
|
||||
network.NewRouteSpec(
|
||||
network.ConfigNamespaceName,
|
||||
|
@ -219,8 +219,6 @@ func (suite *ManagerSuite) TestReconcile() {
|
||||
)
|
||||
|
||||
for _, peer := range []*kubespan.PeerSpec{peer1, peer2} {
|
||||
peer := peer
|
||||
|
||||
ctest.AssertResource(suite,
|
||||
peer.Metadata().ID(),
|
||||
func(res *kubespan.PeerStatus, asrt *assert.Assertions) {
|
||||
@ -326,8 +324,6 @@ func (suite *ManagerSuite) TestReconcile() {
|
||||
)
|
||||
|
||||
for _, peer := range []*kubespan.PeerSpec{peer1, peer2} {
|
||||
peer := peer
|
||||
|
||||
ctest.AssertResource(suite,
|
||||
peer.Metadata().ID(),
|
||||
func(res *kubespan.PeerStatus, asrt *assert.Assertions) {
|
||||
|
@ -160,7 +160,6 @@ func (ctrl *AddressConfigController) apply(ctx context.Context, r controller.Run
|
||||
ids := make([]string, 0, len(addresses))
|
||||
|
||||
for _, address := range addresses {
|
||||
address := address
|
||||
id := network.LayeredID(address.ConfigLayer, network.AddressID(address.LinkName, address.Address))
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -89,8 +89,6 @@ func (ctrl *AddressMergeController) Run(ctx context.Context, r controller.Runtim
|
||||
conflictsDetected := 0
|
||||
|
||||
for id, address := range addresses {
|
||||
address := address
|
||||
|
||||
if err = r.Modify(ctx, network.NewAddressSpec(network.NamespaceName, id), func(res resource.Resource) error {
|
||||
addr := res.(*network.AddressSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
|
@ -189,7 +189,7 @@ func (suite *AddressMergeSuite) TestMergeFlapping() {
|
||||
|
||||
flipflop := func(idx int) func() error {
|
||||
return func() error {
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
if err := suite.state.Create(suite.ctx, resources[idx]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -212,7 +212,7 @@ func (suite *AddressMergeSuite) TestMergeFlapping() {
|
||||
eg.Go(
|
||||
func() error {
|
||||
// add/remove finalizer to the merged resource
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := suite.state.AddFinalizer(
|
||||
suite.ctx,
|
||||
resource.NewMetadata(
|
||||
|
@ -302,7 +302,7 @@ func broadcastAddr(addr netip.Prefix) net.IP {
|
||||
|
||||
out := make(net.IP, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
out[i] = ip[i] | ^mask[i]
|
||||
}
|
||||
|
||||
|
@ -88,15 +88,12 @@ func (ctrl *AddressStatusController) Run(ctx context.Context, r controller.Runti
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
addr := addr
|
||||
|
||||
// TODO: should we use local address actually?
|
||||
// from if_addr.h:
|
||||
// IFA_ADDRESS is prefix address, rather than local interface address.
|
||||
// * It makes no difference for normally configured broadcast interfaces,
|
||||
// * but for point-to-point IFA_ADDRESS is DESTINATION address,
|
||||
// * local address is supplied in IFA_LOCAL attribute.
|
||||
|
||||
ipAddr, _ := netip.AddrFromSlice(addr.Attributes.Address)
|
||||
ipPrefix := netip.PrefixFrom(ipAddr, int(addr.PrefixLength))
|
||||
id := network.AddressID(linkLookup[addr.Index], ipPrefix)
|
||||
|
@ -472,8 +472,6 @@ func (suite *CmdlineSuite) TestParse() {
|
||||
},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
suite.Run(test.name, func() {
|
||||
cmdline := procfs.NewCmdline(test.cmdline)
|
||||
|
||||
|
@ -210,8 +210,6 @@ func (suite *EtcFileConfigSuite) testFiles(resources []resource.Resource, conten
|
||||
)
|
||||
|
||||
for _, id := range unexpectedIDs {
|
||||
id := id
|
||||
|
||||
assertNoResource[*files.EtcFileSpec](suite.ctx, suite.T(), suite.state, id)
|
||||
}
|
||||
}
|
||||
|
@ -181,7 +181,6 @@ func (ctrl *HostnameConfigController) apply(ctx context.Context, r controller.Ru
|
||||
ids := make([]string, 0, len(specs))
|
||||
|
||||
for _, spec := range specs {
|
||||
spec := spec
|
||||
id := network.LayeredID(spec.ConfigLayer, network.HostnameID)
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -52,7 +52,7 @@ func TestProbeHTTP(t *testing.T) {
|
||||
t.Cleanup(p.Stop)
|
||||
|
||||
// probe should always succeed
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
assert.Equal(t, probe.Notification{
|
||||
ID: "test",
|
||||
Status: network.ProbeStatusSpec{
|
||||
@ -124,7 +124,7 @@ func TestProbeConsecutiveFailures(t *testing.T) {
|
||||
// stop the test server, probe should fail
|
||||
server.Close()
|
||||
|
||||
for i := 0; i < p.Spec.FailureThreshold-1; i++ {
|
||||
for range p.Spec.FailureThreshold - 1 {
|
||||
// probe should fail, but no notification should be sent yet (failure threshold not reached)
|
||||
mockClock.Add(p.Spec.Interval)
|
||||
|
||||
|
@ -234,7 +234,6 @@ func (ctrl *LinkConfigController) apply(ctx context.Context, r controller.Runtim
|
||||
ids := make([]string, 0, len(links))
|
||||
|
||||
for _, link := range links {
|
||||
link := link
|
||||
id := network.LayeredID(link.ConfigLayer, network.LinkID(link.Name))
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -103,8 +103,6 @@ func (ctrl *LinkMergeController) Run(ctx context.Context, r controller.Runtime,
|
||||
conflictsDetected := 0
|
||||
|
||||
for id, link := range links {
|
||||
link := link
|
||||
|
||||
if err = r.Modify(ctx, network.NewLinkSpec(network.NamespaceName, id), func(res resource.Resource) error {
|
||||
l := res.(*network.LinkSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
|
@ -213,7 +213,7 @@ func (suite *LinkMergeSuite) TestMergeFlapping() {
|
||||
|
||||
flipflop := func(idx int) func() error {
|
||||
return func() error {
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
if err := suite.state.Create(suite.ctx, resources[idx]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -236,7 +236,7 @@ func (suite *LinkMergeSuite) TestMergeFlapping() {
|
||||
eg.Go(
|
||||
func() error {
|
||||
// add/remove finalizer to the merged resource
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := suite.state.AddFinalizer(
|
||||
suite.ctx,
|
||||
resource.NewMetadata(
|
||||
|
@ -537,10 +537,13 @@ func (suite *LinkSpecSuite) TestBond8023ad() {
|
||||
}
|
||||
networkadapter.BondMasterSpec(&bond.TypedSpec().BondMaster).FillDefaults()
|
||||
|
||||
dummies := []resource.Resource{}
|
||||
dummyNames := []string{}
|
||||
//nolint:prealloc
|
||||
var (
|
||||
dummies []resource.Resource
|
||||
dummyNames []string
|
||||
)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
for range 4 {
|
||||
dummyName := suite.uniqueDummyInterface()
|
||||
dummy := network.NewLinkSpec(network.NamespaceName, dummyName)
|
||||
*dummy.TypedSpec() = network.LinkSpecSpec{
|
||||
@ -944,7 +947,7 @@ func TestSortBonds(t *testing.T) {
|
||||
seed := time.Now().Unix()
|
||||
rnd := rand.New(rand.NewSource(seed))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
res := toResources(expectedSlice)
|
||||
rnd.Shuffle(len(res), func(i, j int) { res[i], res[j] = res[j], res[i] })
|
||||
netctrl.SortBonds(res)
|
||||
|
@ -160,8 +160,6 @@ func (ctrl *LinkStatusController) reconcile(
|
||||
|
||||
// for every rtnetlink discovered link
|
||||
for _, link := range links {
|
||||
link := link
|
||||
|
||||
var (
|
||||
ethState *ethtool.LinkState
|
||||
ethInfo *ethtool.LinkInfo
|
||||
|
@ -318,8 +318,6 @@ func updateAccumulativeAddresses(ctx context.Context, r controller.Runtime, id r
|
||||
spec := r.(*network.NodeAddress).TypedSpec()
|
||||
|
||||
for _, ip := range accumulative {
|
||||
ip := ip
|
||||
|
||||
// find insert position using binary search
|
||||
i := sort.Search(len(spec.Addresses), func(j int) bool {
|
||||
return !spec.Addresses[j].Addr().Less(ip.Addr())
|
||||
|
@ -301,7 +301,6 @@ func (ctrl *OperatorConfigController) apply(ctx context.Context, r controller.Ru
|
||||
ids := make([]string, 0, len(specs))
|
||||
|
||||
for _, spec := range specs {
|
||||
spec := spec
|
||||
id := network.LayeredID(spec.ConfigLayer, network.OperatorID(spec.Operator, spec.LinkName))
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -89,8 +89,6 @@ func (ctrl *OperatorMergeController) Run(ctx context.Context, r controller.Runti
|
||||
conflictsDetected := 0
|
||||
|
||||
for id, operator := range operators {
|
||||
operator := operator
|
||||
|
||||
if err = r.Modify(ctx, network.NewOperatorSpec(network.NamespaceName, id), func(res resource.Resource) error {
|
||||
op := res.(*network.OperatorSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
|
@ -212,7 +212,7 @@ func (suite *OperatorMergeSuite) TestMergeFlapping() {
|
||||
|
||||
flipflop := func(idx int) func() error {
|
||||
return func() error {
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
if err := suite.state.Create(suite.ctx, resources[idx]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -235,7 +235,7 @@ func (suite *OperatorMergeSuite) TestMergeFlapping() {
|
||||
eg.Go(
|
||||
func() error {
|
||||
// add/remove finalizer to the merged resource
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := suite.state.AddFinalizer(
|
||||
suite.ctx,
|
||||
resource.NewMetadata(
|
||||
|
@ -281,8 +281,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
|
||||
for _, op := range ctrl.operators {
|
||||
for _, addressSpec := range op.Operator.AddressSpecs() {
|
||||
addressSpec := addressSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewAddressSpec(
|
||||
network.ConfigNamespaceName,
|
||||
@ -297,8 +295,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
}
|
||||
|
||||
for _, routeSpec := range op.Operator.RouteSpecs() {
|
||||
routeSpec := routeSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewRouteSpec(
|
||||
network.ConfigNamespaceName,
|
||||
@ -316,8 +312,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
}
|
||||
|
||||
for _, linkSpec := range op.Operator.LinkSpecs() {
|
||||
linkSpec := linkSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewLinkSpec(
|
||||
network.ConfigNamespaceName,
|
||||
@ -332,8 +326,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
}
|
||||
|
||||
for _, hostnameSpec := range op.Operator.HostnameSpecs() {
|
||||
hostnameSpec := hostnameSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewHostnameSpec(
|
||||
network.ConfigNamespaceName,
|
||||
@ -348,8 +340,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
}
|
||||
|
||||
for _, resolverSpec := range op.Operator.ResolverSpecs() {
|
||||
resolverSpec := resolverSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewResolverSpec(
|
||||
network.ConfigNamespaceName,
|
||||
@ -364,8 +354,6 @@ func (ctrl *OperatorSpecController) reconcileOperatorOutputs(ctx context.Context
|
||||
}
|
||||
|
||||
for _, timeserverSpec := range op.Operator.TimeServerSpecs() {
|
||||
timeserverSpec := timeserverSpec
|
||||
|
||||
if err := apply(
|
||||
network.NewTimeServerSpec(
|
||||
network.ConfigNamespaceName,
|
||||
|
@ -198,7 +198,7 @@ func (suite *OperatorSpecSuite) assertRunning(runningIDs []string, assertFunc fu
|
||||
}
|
||||
|
||||
for id := range runningOperators {
|
||||
found := false
|
||||
found := false //nolint:copyloopvar
|
||||
|
||||
for _, expectedID := range runningIDs {
|
||||
if expectedID == id {
|
||||
|
@ -174,7 +174,6 @@ func (ctrl *OperatorVIPConfigController) apply(ctx context.Context, r controller
|
||||
ids := make([]string, 0, len(specs))
|
||||
|
||||
for _, spec := range specs {
|
||||
spec := spec
|
||||
id := network.LayeredID(spec.ConfigLayer, network.OperatorID(spec.Operator, spec.LinkName))
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -485,7 +485,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceNamespace := resourceEmpty.Metadata().Namespace()
|
||||
resourceType := resourceEmpty.Metadata().Type()
|
||||
|
||||
for i := 0; i < specType.length; i++ {
|
||||
for i := range specType.length {
|
||||
spec := specType.getter(i)
|
||||
|
||||
id, err := specType.idBuilder(spec)
|
||||
|
@ -138,7 +138,6 @@ func (ctrl *ResolverConfigController) apply(ctx context.Context, r controller.Ru
|
||||
ids := make([]string, 0, len(specs))
|
||||
|
||||
for _, spec := range specs {
|
||||
spec := spec
|
||||
id := network.LayeredID(spec.ConfigLayer, network.ResolverID)
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -154,7 +154,6 @@ func (ctrl *RouteConfigController) apply(ctx context.Context, r controller.Runti
|
||||
ids := make([]string, 0, len(routes))
|
||||
|
||||
for _, route := range routes {
|
||||
route := route
|
||||
id := network.LayeredID(route.ConfigLayer, network.RouteID(route.Table, route.Family, route.Destination, route.Gateway, route.Priority, route.OutLinkName))
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -87,8 +87,6 @@ func (ctrl *RouteMergeController) Run(ctx context.Context, r controller.Runtime,
|
||||
conflictsDetected := 0
|
||||
|
||||
for id, route := range routes {
|
||||
route := route
|
||||
|
||||
if err = r.Modify(ctx, network.NewRouteSpec(network.NamespaceName, id), func(res resource.Resource) error {
|
||||
rt := res.(*network.RouteSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
|
@ -255,7 +255,7 @@ func (suite *RouteMergeSuite) TestMergeFlapping() {
|
||||
|
||||
flipflop := func(idx int) func() error {
|
||||
return func() error {
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
if err := suite.state.Create(suite.ctx, resources[idx]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -278,7 +278,7 @@ func (suite *RouteMergeSuite) TestMergeFlapping() {
|
||||
eg.Go(
|
||||
func() error {
|
||||
// add/remove finalizer to the merged resource
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := suite.state.AddFinalizer(
|
||||
suite.ctx,
|
||||
resource.NewMetadata(
|
||||
|
@ -98,8 +98,6 @@ func (ctrl *RouteStatusController) Run(ctx context.Context, r controller.Runtime
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
route := route
|
||||
|
||||
dstAddr, _ := netip.AddrFromSlice(route.Attributes.Dst)
|
||||
dstPrefix := netip.PrefixFrom(dstAddr, int(route.DstLength))
|
||||
srcAddr, _ := netip.AddrFromSlice(route.Attributes.Src)
|
||||
|
@ -138,7 +138,6 @@ func (ctrl *TimeServerConfigController) apply(ctx context.Context, r controller.
|
||||
ids := make([]string, 0, len(specs))
|
||||
|
||||
for _, spec := range specs {
|
||||
spec := spec
|
||||
id := network.LayeredID(spec.ConfigLayer, network.TimeServerID)
|
||||
|
||||
if err := r.Modify(
|
||||
|
@ -206,7 +206,7 @@ func (suite *EventsSinkSuite) TestDrain() {
|
||||
ctx, cancel := context.WithCancel(suite.ctx)
|
||||
defer cancel()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
suite.events.Publish(
|
||||
ctx,
|
||||
&machine.PhaseEvent{
|
||||
|
@ -77,8 +77,6 @@ func (suite *KernelParamDefaultsSuite) TestContainerMode() {
|
||||
suite.startRuntime()
|
||||
|
||||
for _, prop := range getParams(runtime.ModeContainer) {
|
||||
prop := prop
|
||||
|
||||
suite.Assert().NoError(retry.Constant(10*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
resource.NewMetadata(runtimeresource.NamespaceName, runtimeresource.KernelParamDefaultSpecType, prop.Key, resource.VersionUndefined),
|
||||
@ -101,8 +99,6 @@ func (suite *KernelParamDefaultsSuite) TestMetalMode() {
|
||||
suite.startRuntime()
|
||||
|
||||
for _, prop := range getParams(runtime.ModeMetal) {
|
||||
prop := prop
|
||||
|
||||
suite.Assert().NoError(retry.Constant(10*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
resource.NewMetadata(runtimeresource.NamespaceName, runtimeresource.KernelParamDefaultSpecType, prop.Key, resource.VersionUndefined),
|
||||
|
@ -196,8 +196,6 @@ func (ctrl *KmsgLogDeliveryController) resend(ctx context.Context, r controller.
|
||||
sendErrors := make(chan error, len(senders))
|
||||
|
||||
for _, sender := range senders {
|
||||
sender := sender
|
||||
|
||||
go func() {
|
||||
sendErrors <- sender.Send(sendCtx, e)
|
||||
}()
|
||||
|
@ -400,7 +400,7 @@ func (ctrl *MachineStatusController) watchEvents() {
|
||||
)
|
||||
|
||||
for ev := range eventCh {
|
||||
newStage := oldStage
|
||||
newStage := oldStage //nolint:copyloopvar
|
||||
|
||||
switch event := ev.Event.Payload.(type) {
|
||||
case *machineapi.SequenceEvent:
|
||||
|
@ -257,8 +257,6 @@ func (handler *circularHandler) resend(e *runtime.LogEvent) {
|
||||
sendErrors := make(chan error, len(senders))
|
||||
|
||||
for _, sender := range senders {
|
||||
sender := sender
|
||||
|
||||
go func() {
|
||||
sendErrors <- sender.Send(sendCtx, e)
|
||||
}()
|
||||
|
@ -94,7 +94,6 @@ func TestParseLogLine(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
name, tc := name, tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -45,8 +45,6 @@ func TestQuote(t *testing.T) {
|
||||
expected: "http://my-host/config.yaml?uuid=\\$\\{uuid\\}\\&serial=\\$\\{serial\\}\\&mac=\\$\\{mac\\}\\&hostname=\\$\\{hostname\\}",
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -94,8 +92,6 @@ func TestUnquote(t *testing.T) {
|
||||
expected: "http://my-host/config.yaml?uuid=${uuid}&serial=${serial}&mac=${mac}&hostname=${hostname}",
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -70,8 +70,6 @@ func TestMapValues(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -88,8 +88,6 @@ func TestPopulate(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -59,8 +59,6 @@ func TestVariableMatches(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -138,8 +136,6 @@ func TestVariableReplace(t *testing.T) {
|
||||
expected: "https://example.com?a=value1",
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -51,8 +51,6 @@ func TestParseMetadata(t *testing.T) {
|
||||
expected: expectedNetworkConfigV2,
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
n := &nocloud.Nocloud{}
|
||||
|
||||
|
@ -52,8 +52,6 @@ func TestApplyNetworkConfigV2a(t *testing.T) {
|
||||
expected: expectedNetworkConfigMatchByName,
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st := state.WrapCore(namespaced.NewState(inmem.Build))
|
||||
|
@ -303,12 +303,8 @@ func (c *Controller) runPhase(ctx context.Context, phase runtime.Phase, seq runt
|
||||
|
||||
for number, task := range phase.Tasks {
|
||||
// Make the task number human friendly.
|
||||
number := number
|
||||
|
||||
number++
|
||||
|
||||
task := task
|
||||
|
||||
eg.Go(func() error {
|
||||
progress := fmt.Sprintf("%d/%d", number, len(phase.Tasks))
|
||||
|
||||
|
@ -76,13 +76,13 @@ func TestEvents_Publish(t *testing.T) {
|
||||
|
||||
got := uint32(0)
|
||||
|
||||
for i := 0; i < tt.watchers; i++ {
|
||||
for range tt.watchers {
|
||||
if err := e.Watch(func(events <-chan runtime.EventInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
l := rate.NewLimiter(500, tt.cap*8/10)
|
||||
|
||||
for j := 0; j < tt.messages; j++ {
|
||||
for j := range tt.messages {
|
||||
event, ok := <-events
|
||||
|
||||
if !ok {
|
||||
@ -110,7 +110,7 @@ func TestEvents_Publish(t *testing.T) {
|
||||
|
||||
l := rate.NewLimiter(500, tt.cap/2)
|
||||
|
||||
for i := 0; i < tt.messages; i++ {
|
||||
for i := range tt.messages {
|
||||
_ = l.Wait(context.Background()) //nolint:errcheck
|
||||
|
||||
e.Publish(context.Background(), &machine.SequenceEvent{
|
||||
@ -135,7 +135,7 @@ func receive(t *testing.T, e runtime.Watcher, n int, opts ...runtime.WatchOption
|
||||
if err := e.Watch(func(events <-chan runtime.EventInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
for range n {
|
||||
event, ok := <-events
|
||||
if !ok {
|
||||
t.Fatalf("Watch: chanel closed")
|
||||
@ -186,7 +186,7 @@ func gen(k, l int) (result []int) {
|
||||
func TestEvents_WatchOptionsTailEvents(t *testing.T) {
|
||||
e := NewEvents(100, 10)
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
for i := range 200 {
|
||||
e.Publish(context.Background(), &machine.SequenceEvent{
|
||||
Sequence: strconv.Itoa(i),
|
||||
})
|
||||
@ -203,7 +203,7 @@ func TestEvents_WatchOptionsTailEvents(t *testing.T) {
|
||||
|
||||
e = NewEvents(100, 10)
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
for i := range 30 {
|
||||
e.Publish(context.Background(), &machine.SequenceEvent{
|
||||
Sequence: strconv.Itoa(i),
|
||||
})
|
||||
@ -219,7 +219,7 @@ func TestEvents_WatchOptionsTailEvents(t *testing.T) {
|
||||
func TestEvents_WatchOptionsTailSeconds(t *testing.T) {
|
||||
e := NewEvents(100, 10)
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
e.Publish(context.Background(), &machine.SequenceEvent{
|
||||
Sequence: strconv.Itoa(i),
|
||||
})
|
||||
@ -242,7 +242,7 @@ func TestEvents_WatchOptionsTailSeconds(t *testing.T) {
|
||||
func TestEvents_WatchOptionsTailID(t *testing.T) {
|
||||
e := NewEvents(100, 10)
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
e.Publish(context.Background(), &machine.SequenceEvent{
|
||||
Sequence: strconv.Itoa(i),
|
||||
})
|
||||
@ -262,7 +262,7 @@ func BenchmarkWatch(b *testing.B) {
|
||||
|
||||
wg.Add(b.N)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
_ = e.Watch(func(events <-chan runtime.EventInfo) { wg.Done() }) //nolint:errcheck
|
||||
}
|
||||
|
||||
@ -280,11 +280,11 @@ func BenchmarkPublish(bb *testing.B) {
|
||||
|
||||
wg.Add(watchers)
|
||||
|
||||
for j := 0; j < watchers; j++ {
|
||||
for range watchers {
|
||||
_ = e.Watch(func(events <-chan runtime.EventInfo) { //nolint:errcheck
|
||||
defer wg.Done()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if _, ok := <-events; !ok {
|
||||
return
|
||||
}
|
||||
@ -296,7 +296,7 @@ func BenchmarkPublish(bb *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
e.Publish(context.Background(), &ev)
|
||||
}
|
||||
|
||||
|
@ -102,8 +102,6 @@ func TestPriorityLockConcurrent(t *testing.T) {
|
||||
sequenceCh := make(chan testSequenceNumber)
|
||||
|
||||
for seq := testSequenceNumber(1); seq <= 20; seq++ {
|
||||
seq := seq
|
||||
|
||||
eg.Go(func() error {
|
||||
ctx, err := lock.Lock(globalCtx, time.Second, seq)
|
||||
if errors.Is(err, runtime.ErrLocked) {
|
||||
|
@ -849,8 +849,6 @@ func partitionAndFormatDisks(logger *log.Logger, r runtime.Runtime) error {
|
||||
}
|
||||
|
||||
for _, disk := range r.Config().Machine().Disks() {
|
||||
disk := disk
|
||||
|
||||
if err := func() error {
|
||||
bd, err := blockdevice.Open(disk.Device(), blockdevice.WithMode(blockdevice.ReadonlyMode))
|
||||
if err != nil {
|
||||
|
@ -449,8 +449,6 @@ func (ctrl *Controller) updateLoggingConfig(ctx context.Context, dests []talosco
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, sender := range prevSenders {
|
||||
sender := sender
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
|
@ -32,7 +32,7 @@ func (suite *EventsSuite) TestEmpty() {
|
||||
func (suite *EventsSuite) TestSome() {
|
||||
var e events.ServiceEvents
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
e.Push(events.ServiceEvent{
|
||||
Message: strconv.Itoa(i),
|
||||
})
|
||||
@ -56,7 +56,7 @@ func (suite *EventsSuite) TestOverflow() {
|
||||
|
||||
numEvents := events.MaxEventsToKeep*2 + 3
|
||||
|
||||
for i := 0; i < numEvents; i++ {
|
||||
for i := range numEvents {
|
||||
e.Push(events.ServiceEvent{
|
||||
Message: strconv.Itoa(i),
|
||||
})
|
||||
|
@ -45,7 +45,7 @@ func (suite *CheckSuite) TestHealthy() {
|
||||
errCh <- health.Run(ctx, &settings, &state, check)
|
||||
}()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if atomic.LoadUint32(&called) > 2 {
|
||||
@ -94,7 +94,7 @@ func (suite *CheckSuite) TestHealthChange() {
|
||||
}()
|
||||
|
||||
// wait for the first health change
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
if state.Get().Healthy != nil {
|
||||
break
|
||||
}
|
||||
@ -107,7 +107,7 @@ func (suite *CheckSuite) TestHealthChange() {
|
||||
|
||||
atomic.StoreUint32(&healthy, 1)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
if *state.Get().Healthy {
|
||||
@ -162,7 +162,7 @@ func (suite *CheckSuite) TestCheckAbort() {
|
||||
}()
|
||||
|
||||
// wait for the first health change
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
if state.Get().Healthy != nil {
|
||||
break
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func TestRestartService(t *testing.T) {
|
||||
|
||||
services.Load(TestService{})
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.NoError(t, services.Start("test-service"))
|
||||
|
||||
require.NoError(t, system.WaitForServiceWithInstance(services, system.StateEventUp, "test-service").Wait(ctx))
|
||||
|
@ -197,7 +197,7 @@ func (suite *ContainerdSuite) TestRunTwice() {
|
||||
|
||||
// running same container twice should be fine
|
||||
// (checks that containerd state is cleaned up properly)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
// calling stop when Run has finished is no-op
|
||||
suite.Assert().NoError(r.Stop())
|
||||
@ -310,7 +310,7 @@ func (suite *ContainerdSuite) TestStopFailingAndRestarting() {
|
||||
done <- r.Run(MockEventSink)
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if bytes.Contains(suite.getLogContents(suite.containerID+".log"), []byte("fail\n")) {
|
||||
@ -330,7 +330,7 @@ func (suite *ContainerdSuite) TestStopFailingAndRestarting() {
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().NoError(f.Close())
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if bytes.Contains(suite.getLogContents(suite.containerID+".log"), []byte("ok\n")) {
|
||||
|
@ -129,7 +129,7 @@ func (suite *ProcessSuite) TestRunRestartFailed() {
|
||||
return logContents
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(fetchLog()) > 20 {
|
||||
|
@ -598,7 +598,7 @@ func promoteMember(ctx context.Context, r runtime.Runtime, memberID uint64) erro
|
||||
}
|
||||
|
||||
// try to iterate all available endpoints in the time available for an attempt
|
||||
for i := 0; i < len(endpoints); i++ {
|
||||
for range len(endpoints) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return retry.ExpectedError(ctx.Err())
|
||||
|
@ -91,8 +91,6 @@ func TestParseArgs(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -74,8 +74,6 @@ func TestCertificate(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
serverCSR, serverCert, err := x509.NewEd25519CSRAndIdentity(tt.csrSetters...)
|
||||
require.NoError(t, err)
|
||||
|
@ -59,8 +59,6 @@ func (suite *ApidSuite) TestControlPlaneRouting() {
|
||||
nodes := suite.DiscoverNodeInternalIPs(suite.ctx)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
|
||||
suite.Run(endpoint, func() {
|
||||
cli, err := client.New(suite.ctx,
|
||||
client.WithConfig(suite.Talosconfig),
|
||||
@ -112,8 +110,6 @@ func (suite *ApidSuite) TestWorkerNoRouting() {
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
|
||||
suite.Run(endpoint, func() {
|
||||
cli, err := client.New(suite.ctx,
|
||||
client.WithConfig(suite.Talosconfig),
|
||||
|
@ -483,7 +483,7 @@ func (suite *ApplyConfigSuite) TestApplyTry() {
|
||||
|
||||
suite.Assert().Truef(lookupDummyInterface(), "dummy interface wasn't found")
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
provider, err = getMachineConfig(nodeCtx)
|
||||
suite.Assert().Nilf(err, "failed to read existing config from node %q: %s", node, err)
|
||||
|
||||
|
@ -198,7 +198,7 @@ func (suite *DiscoverySuite) TestRegistries() {
|
||||
|
||||
var rawAffiliates []*cluster.Affiliate
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
for range 30 {
|
||||
rawAffiliates = suite.getAffiliates(nodeCtx, cluster.RawNamespaceName)
|
||||
|
||||
if len(rawAffiliates) == expectedRawAffiliates {
|
||||
|
@ -99,8 +99,6 @@ func (suite *EtcdRecoverSuite) TestSnapshotRecover() {
|
||||
errCh := make(chan error)
|
||||
|
||||
for _, node := range controlPlaneNodes {
|
||||
node := node
|
||||
|
||||
go func() {
|
||||
errCh <- func() error {
|
||||
nodeCtx := client.WithNodes(suite.ctx, node)
|
||||
|
@ -74,8 +74,6 @@ func (suite *FirewallSuite) TestKubeletAccess() {
|
||||
}
|
||||
|
||||
for _, node := range allNodes {
|
||||
node := node
|
||||
|
||||
eg.Go(func() error {
|
||||
attemptCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
@ -86,7 +86,7 @@ func (suite *LogsSuite) TestServicesHaveLogs() {
|
||||
func (suite *LogsSuite) TestTail() {
|
||||
// invoke machined enough times to generate
|
||||
// some logs
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
_, err := suite.Client.Version(suite.nodeCtx)
|
||||
suite.Require().NoError(err)
|
||||
}
|
||||
@ -160,7 +160,7 @@ func (suite *LogsSuite) testStreaming(tailLines int32) {
|
||||
if tailLines >= 0 {
|
||||
// invoke machined enough times to generate
|
||||
// some logs
|
||||
for i := int32(0); i < tailLines; i++ {
|
||||
for range tailLines {
|
||||
_, err := suite.Client.Stats(
|
||||
suite.nodeCtx,
|
||||
constants.SystemContainerdNamespace,
|
||||
|
@ -256,7 +256,7 @@ func (suite *ResetSuite) TestResetDuringBoot() {
|
||||
|
||||
suite.T().Log("Resetting node", node)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
bootID := suite.ReadBootIDWithRetry(nodeCtx, time.Minute*5)
|
||||
|
||||
err := retry.Constant(5*time.Minute, retry.WithUnits(time.Millisecond*1000)).Retry(
|
||||
|
@ -74,8 +74,6 @@ func (suite *ResourcesSuite) TestListResources() {
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
|
||||
for _, resourceType := range resourceTypes {
|
||||
resourceType := resourceType
|
||||
|
||||
eg.Go(func() error {
|
||||
for _, namespace := range namespaces {
|
||||
_, err := suite.Client.COSI.List(egCtx, resource.NewMetadata(namespace, resourceType, "", resource.VersionUndefined))
|
||||
|
@ -392,7 +392,7 @@ func (apiSuite *APISuite) ClearConnectionRefused(ctx context.Context, nodes ...s
|
||||
}
|
||||
|
||||
apiSuite.Require().NoError(retry.Constant(backoff.DefaultConfig.MaxDelay, retry.WithUnits(time.Second)).Retry(func() error {
|
||||
for i := 0; i < numMasterNodes; i++ {
|
||||
for range numMasterNodes {
|
||||
_, err := apiSuite.Client.Version(client.WithNodes(ctx, nodes...))
|
||||
if err == nil {
|
||||
continue
|
||||
|
@ -171,7 +171,6 @@ func (suite *TalosconfigSuite) TestNew() {
|
||||
opts: []base.RunOption{base.StdoutShouldMatch(regexp.MustCompile(`hosts`))},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
name := strings.Join(tt.args, "_")
|
||||
suite.Run(name, func() {
|
||||
suite.T().Parallel()
|
||||
@ -229,7 +228,6 @@ func (suite *TalosconfigSuite) TestNew() {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
name := strings.Join(tt.args, "_")
|
||||
suite.Run(name, func() {
|
||||
suite.T().Parallel()
|
||||
@ -266,7 +264,6 @@ func (suite *TalosconfigSuite) TestNew() {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
name := strings.Join(tt.args, "_")
|
||||
suite.Run(name, func() {
|
||||
suite.T().Parallel()
|
||||
@ -313,7 +310,6 @@ func (suite *TalosconfigSuite) TestNew() {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
name := strings.Join(tt.args, "_")
|
||||
suite.Run(name, func() {
|
||||
suite.T().Parallel()
|
||||
|
@ -183,8 +183,6 @@ func (suite *GenSuite) testGenConfigPatch(patch []byte) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
suite.Run(tt.flag, func() {
|
||||
suite.RunCLI([]string{"gen", "config", "--force", "foo", "https://192.168.0.1:6443", "--" + tt.flag, string(patch)},
|
||||
base.StdoutEmpty(),
|
||||
|
@ -95,7 +95,6 @@ func (suite *ListSuite) TestDepth() {
|
||||
|
||||
{separators: 5, flags: []string{"--recurse=true"}},
|
||||
} {
|
||||
test := test
|
||||
suite.Run(strings.Join(test.flags, ","), func() {
|
||||
suite.T().Parallel()
|
||||
runAndCheck(suite.T(), test.separators, test.flags...)
|
||||
|
@ -34,7 +34,7 @@ func (suite *LogsSuite) TestTailLogs() {
|
||||
node := suite.RandomDiscoveredNodeInternalIP()
|
||||
|
||||
// run some machined API calls to produce enough log lines
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
suite.RunCLI([]string{"-n", node, "version"})
|
||||
}
|
||||
|
||||
|
@ -52,11 +52,7 @@ func (suite *ValidateSuite) TestValidate() {
|
||||
)
|
||||
|
||||
for _, configFile := range []string{"controlplane.yaml", "worker.yaml"} {
|
||||
configFile := configFile
|
||||
|
||||
for _, mode := range []string{"cloud", "container"} {
|
||||
mode := mode
|
||||
|
||||
suite.Run(fmt.Sprintf("%s-%s", configFile, mode), func() {
|
||||
suite.RunCLI([]string{"validate", "-m", mode, "-c", configFile, "--strict"})
|
||||
})
|
||||
|
@ -179,7 +179,7 @@ func (suite *BaseSuite) waitForClusterHealth() {
|
||||
runs = 3
|
||||
}
|
||||
|
||||
for run := 0; run < runs; run++ {
|
||||
for run := range runs {
|
||||
if run > 0 {
|
||||
time.Sleep(15 * time.Second)
|
||||
}
|
||||
@ -570,7 +570,7 @@ func (suite *BaseSuite) setupCluster(options clusterOptions) {
|
||||
)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
for i := 0; i < options.ControlplaneNodes; i++ {
|
||||
for i := range options.ControlplaneNodes {
|
||||
request.Nodes = append(
|
||||
request.Nodes,
|
||||
provision.NodeRequest{
|
||||
|
@ -157,12 +157,8 @@ func (suite *ResolverSuite) TestRegistryEndpoints() {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
|
||||
suite.Run(tt.name, func() {
|
||||
for _, req := range tt.requests {
|
||||
req := req
|
||||
|
||||
suite.Run(req.host, func() {
|
||||
endpoints, overridePath, err := image.RegistryEndpoints(tt.config, req.host)
|
||||
|
||||
|
@ -108,8 +108,6 @@ func (c *Client) StopAndRemovePodSandboxes(ctx context.Context, stopAction StopA
|
||||
var g errgroup.Group
|
||||
|
||||
for _, pod := range pods {
|
||||
pod := pod // https://golang.org/doc/faq#closures_and_goroutines
|
||||
|
||||
g.Go(func() error {
|
||||
status, _, e := c.PodSandboxStatus(ctx, pod.GetId())
|
||||
if e != nil {
|
||||
@ -177,8 +175,6 @@ func stopAndRemove(ctx context.Context, stopAction StopAction, client *Client, p
|
||||
var g errgroup.Group
|
||||
|
||||
for _, container := range containers {
|
||||
container := container // https://golang.org/doc/faq#closures_and_goroutines
|
||||
|
||||
g.Go(func() error {
|
||||
// TODO(andrewrynhard): Can we set the timeout dynamically?
|
||||
if container.State == runtimeapi.ContainerState_CONTAINER_RUNNING || container.State == runtimeapi.ContainerState_CONTAINER_UNKNOWN {
|
||||
|
@ -39,8 +39,8 @@ func (w *TermUIWrapper) Draw(screen tcell.Screen) {
|
||||
buf := termui.NewBuffer(w.termUIDrawable.GetRect())
|
||||
w.termUIDrawable.Draw(buf)
|
||||
|
||||
for i := 0; i < width; i++ {
|
||||
for j := 0; j < height; j++ {
|
||||
for i := range width {
|
||||
for j := range height {
|
||||
cell := buf.GetCell(image.Point{X: i, Y: j})
|
||||
|
||||
style := w.convertStyle(cell.Style)
|
||||
|
@ -469,9 +469,8 @@ func (d *Dashboard) processLog(node, line string) {
|
||||
|
||||
func (d *Dashboard) selectScreen(screen Screen) {
|
||||
for _, info := range d.screenConfigs {
|
||||
info := info
|
||||
if info.screen == screen {
|
||||
d.selectedScreenConfig = &info
|
||||
d.selectedScreenConfig = &info //nolint:exportloopref
|
||||
|
||||
d.mainGrid.AddItem(info.primitive, 1, 0, 1, 1, 0, 0, false)
|
||||
|
||||
|
@ -72,8 +72,6 @@ func (source *Source) run(ctx context.Context) {
|
||||
|
||||
nodes := source.nodes(ctx)
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
|
||||
source.eg.Go(func() error {
|
||||
source.runResourceWatchWithRetries(ctx, node)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user