diff --git a/client/client_test.go b/client/client_test.go index 91b301bdf280..b434b347c037 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -213,7 +213,7 @@ var allTests = []func(t *testing.T, sb integration.Sandbox){ } func TestIntegration(t *testing.T) { - testIntegration(t, allTests...) + testIntegration(t, append(allTests, validationTests...)...) } func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sandbox)) { @@ -8913,6 +8913,7 @@ cat < $BUILDKIT_SCAN_DESTINATION/spdx.json EOF ` img.Config.Cmd = []string{"/bin/sh", "-c", cmd} + img.Platform = p config, err := json.Marshal(img) if err != nil { return nil, errors.Wrapf(err, "failed to marshal image config") @@ -9191,6 +9192,7 @@ cat < $BUILDKIT_SCAN_DESTINATION/spdx.json EOF ` img.Config.Cmd = []string{"/bin/sh", "-c", cmd} + img.Platform = p config, err := json.Marshal(img) if err != nil { return nil, errors.Wrapf(err, "failed to marshal image config") @@ -9243,6 +9245,7 @@ EOF var img ocispecs.Image img.Config.Cmd = []string{"/bin/sh", "-c", "cat /greeting"} + img.Platform = p config, err := json.Marshal(img) if err != nil { return nil, errors.Wrapf(err, "failed to marshal image config") diff --git a/client/validation_test.go b/client/validation_test.go new file mode 100644 index 000000000000..672054a6a301 --- /dev/null +++ b/client/validation_test.go @@ -0,0 +1,319 @@ +package client + +import ( + "context" + "encoding/json" + "io" + "testing" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend/gateway/client" + sppb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/moby/buildkit/util/testutil/workers" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/require" +) + +var validationTests = []func(t *testing.T, sb integration.Sandbox){ + testValidateNullConfig, + testValidateInvalidConfig, + testValidatePlatformsEmpty, + testValidatePlatformsInvalid, + testValidateSourcePolicy, +} + +func testValidateNullConfig(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + workers.CheckFeatureCompat(t, sb, workers.FeatureOCIExporter) + + ctx := sb.Context() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + b := func(ctx context.Context, c client.Client) (*client.Result, error) { + def, err := llb.Scratch().Marshal(ctx) + if err != nil { + return nil, err + } + + res, err := c.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + res.AddMeta("containerimage.config", []byte("null")) + return res, nil + } + + _, err = c.Build(ctx, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }, + }, + }, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid null image config for export") +} + +func testValidateInvalidConfig(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + workers.CheckFeatureCompat(t, sb, workers.FeatureOCIExporter) + + ctx := sb.Context() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + b := func(ctx context.Context, c client.Client) (*client.Result, error) { + def, err := llb.Scratch().Marshal(ctx) + if err != nil { + return nil, err + } + + res, err := c.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + var img ocispecs.Image + img.Platform = ocispecs.Platform{ + Architecture: "amd64", + } + dt, err := json.Marshal(img) + if err != nil { + return nil, err + } + res.AddMeta("containerimage.config", dt) + return res, nil + } + + _, err = c.Build(ctx, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }, + }, + }, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid image config: os and architecture must be specified together") +} + +func testValidatePlatformsEmpty(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + workers.CheckFeatureCompat(t, sb, workers.FeatureOCIExporter) + + ctx := sb.Context() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + b := func(ctx context.Context, c client.Client) (*client.Result, error) { + def, err := llb.Scratch().Marshal(ctx) + if err != nil { + return nil, err + } + + res, err := c.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + res.AddMeta("refs.platforms", []byte("null")) + return res, nil + } + + _, err = c.Build(ctx, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }, + }, + }, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid empty platforms index for exporter") +} + +func testValidatePlatformsInvalid(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + workers.CheckFeatureCompat(t, sb, workers.FeatureOCIExporter) + + ctx := sb.Context() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + tcases := []struct { + name string + value []exptypes.Platform + exp string + }{ + { + name: "emptyID", + value: []exptypes.Platform{{}}, + exp: "invalid empty platform key for exporter", + }, + { + name: "missingOS", + value: []exptypes.Platform{ + { + ID: "foo", + }, + }, + exp: "invalid platform value", + }, + } + + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + b := func(ctx context.Context, c client.Client) (*client.Result, error) { + def, err := llb.Scratch().Marshal(ctx) + if err != nil { + return nil, err + } + + res, err := c.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + dt, err := json.Marshal(exptypes.Platforms{Platforms: tc.value}) + if err != nil { + return nil, err + } + + res.AddMeta("refs.platforms", dt) + return res, nil + } + + _, err = c.Build(ctx, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }, + }, + }, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), tc.exp) + }) + } +} + +func testValidateSourcePolicy(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + + ctx := sb.Context() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + tcases := []struct { + name string + value *sppb.Policy + exp string + }{ + // this condition fails on marshaling atm + // { + // name: "nilrule", + // value: &sppb.Policy{ + // Rules: []*sppb.Rule{nil}, + // }, + // exp: "", + // }, + { + name: "nilselector", + value: &sppb.Policy{ + Rules: []*sppb.Rule{ + { + Action: sppb.PolicyAction_CONVERT, + }, + }, + }, + exp: "invalid nil selector in policy", + }, + { + name: "emptyaction", + value: &sppb.Policy{ + Rules: []*sppb.Rule{ + { + Action: sppb.PolicyAction(9000), + Selector: &sppb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + }, + }, + exp: "unknown type", + }, + { + name: "nilupdates", + value: &sppb.Policy{ + Rules: []*sppb.Rule{ + { + Action: sppb.PolicyAction_CONVERT, + Selector: &sppb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + }, + }, + exp: "missing destination for convert rule", + }, + } + + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + var viaFrontend bool + + b := func(ctx context.Context, c client.Client) (*client.Result, error) { + def, err := llb.Image("alpine").Marshal(ctx) + if err != nil { + return nil, err + } + + req := client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + } + if viaFrontend { + req.SourcePolicies = []*sppb.Policy{ + tc.value, + } + } + return c.Solve(ctx, req) + } + + _, err = c.Build(ctx, SolveOpt{ + SourcePolicy: tc.value, + }, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), tc.exp) + + viaFrontend = true + _, err = c.Build(ctx, SolveOpt{}, "", b, nil) + require.Error(t, err) + require.Contains(t, err.Error(), tc.exp) + }) + } +} diff --git a/control/control.go b/control/control.go index 276003604db5..40058f8fe1f1 100644 --- a/control/control.go +++ b/control/control.go @@ -420,6 +420,9 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* var cacheImports []frontend.CacheOptionsEntry for _, im := range req.Cache.Imports { + if im == nil { + continue + } cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ Type: im.Type, Attrs: im.Attrs, diff --git a/exporter/containerimage/exptypes/parse.go b/exporter/containerimage/exptypes/parse.go index 293a24ed0772..e8d9b7f0cb73 100644 --- a/exporter/containerimage/exptypes/parse.go +++ b/exporter/containerimage/exptypes/parse.go @@ -17,6 +17,18 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) { return Platforms{}, errors.Wrapf(err, "failed to parse platforms passed to provenance processor") } } + if len(ps.Platforms) == 0 { + return Platforms{}, errors.Errorf("invalid empty platforms index for exporter") + } + for i, p := range ps.Platforms { + if p.ID == "" { + return Platforms{}, errors.Errorf("invalid empty platform key for exporter") + } + if p.Platform.OS == "" || p.Platform.Architecture == "" { + return Platforms{}, errors.Errorf("invalid platform value %v for exporter", p.Platform) + } + ps.Platforms[i].Platform = platforms.Normalize(p.Platform) + } return ps, nil } @@ -36,6 +48,8 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) { OSFeatures: img.OSFeatures, Variant: img.Variant, } + } else if img.OS != "" || img.Architecture != "" { + return Platforms{}, errors.Errorf("invalid image config: os and architecture must be specified together") } } p = platforms.Normalize(p) diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index 089c11798854..303b9ff06580 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -659,11 +659,27 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { } func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache *exptypes.InlineCacheEntry, epoch *time.Time) ([]byte, error) { + var img ocispecs.Image + if err := json.Unmarshal(dt, &img); err != nil { + return nil, errors.Wrap(err, "invalid image config for export") + } + m := map[string]json.RawMessage{} if err := json.Unmarshal(dt, &m); err != nil { return nil, errors.Wrap(err, "failed to parse image config for patch") } + if m == nil { + return nil, errors.Errorf("invalid null image config for export") + } + + if img.OS == "" { + return nil, errors.Errorf("invalid image config for export: missing os") + } + if img.Architecture == "" { + return nil, errors.Errorf("invalid image config for export: missing architecture") + } + var rootFS ocispecs.RootFS rootFS.Type = "layers" for _, desc := range descs { diff --git a/frontend/gateway/client/attestation.go b/frontend/gateway/client/attestation.go index 5ffe67233c50..c5112db9db64 100644 --- a/frontend/gateway/client/attestation.go +++ b/frontend/gateway/client/attestation.go @@ -30,8 +30,14 @@ func AttestationToPB[T any](a *result.Attestation[T]) (*pb.Attestation, error) { } func AttestationFromPB[T any](a *pb.Attestation) (*result.Attestation[T], error) { + if a == nil { + return nil, errors.Errorf("invalid nil attestation") + } subjects := make([]result.InTotoSubject, len(a.InTotoSubjects)) for i, subject := range a.InTotoSubjects { + if subject == nil { + return nil, errors.Errorf("invalid nil attestation subject") + } subjects[i] = result.InTotoSubject{ Kind: subject.Kind, Name: subject.Name, diff --git a/frontend/gateway/gateway.go b/frontend/gateway/gateway.go index 4ca0ae6b0f72..bc0189bdf900 100644 --- a/frontend/gateway/gateway.go +++ b/frontend/gateway/gateway.go @@ -643,12 +643,21 @@ func (lbf *llbBridgeForwarder) registerResultIDs(results ...solver.Result) (ids func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { var cacheImports []frontend.CacheOptionsEntry for _, e := range req.CacheImports { + if e == nil { + return nil, errors.Errorf("invalid nil cache import") + } cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ Type: e.Type, Attrs: e.Attrs, }) } + for _, p := range req.SourcePolicies { + if p == nil { + return nil, errors.Errorf("invalid nil source policy") + } + } + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{ Evaluate: req.Evaluate, @@ -1073,6 +1082,12 @@ func (lbf *llbBridgeForwarder) ReleaseContainer(ctx context.Context, in *pb.Rele } func (lbf *llbBridgeForwarder) Warn(ctx context.Context, in *pb.WarnRequest) (*pb.WarnResponse, error) { + // validate ranges are valid + for _, r := range in.Ranges { + if r == nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid source range") + } + } err := lbf.llbBridge.Warn(ctx, in.Digest, string(in.Short), frontend.WarnOpts{ Level: int(in.Level), SourceInfo: in.Info, diff --git a/solver/llbsolver/bridge.go b/solver/llbsolver/bridge.go index a94dc4114528..5fd66c9fb68e 100644 --- a/solver/llbsolver/bridge.go +++ b/solver/llbsolver/bridge.go @@ -86,6 +86,14 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp } var polEngine SourcePolicyEvaluator if srcPol != nil || len(pol) > 0 { + for _, p := range pol { + if p == nil { + return nil, errors.Errorf("invalid nil policy") + } + if err := validateSourcePolicy(*p); err != nil { + return nil, err + } + } if srcPol != nil { pol = append([]*spb.Policy{srcPol}, pol...) } diff --git a/solver/llbsolver/solver.go b/solver/llbsolver/solver.go index 452d43c2a493..7e33be3b4886 100644 --- a/solver/llbsolver/solver.go +++ b/solver/llbsolver/solver.go @@ -474,6 +474,9 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro j.SetValue(keyEntitlements, set) if srcPol != nil { + if err := validateSourcePolicy(*srcPol); err != nil { + return nil, err + } j.SetValue(keySourcePolicy, *srcPol) } @@ -610,6 +613,23 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } +func validateSourcePolicy(pol spb.Policy) error { + for _, r := range pol.Rules { + if r == nil { + return errors.New("invalid nil rule in policy") + } + if r.Selector == nil { + return errors.New("invalid nil selector in policy") + } + for _, c := range r.Selector.Constraints { + if c == nil { + return errors.New("invalid nil constraint in policy") + } + } + } + return nil +} + func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], inp *result.Result[cache.ImmutableRef]) (map[string]string, error) { eg, ctx := errgroup.WithContext(ctx) g := session.NewGroup(j.SessionID) @@ -1035,6 +1055,9 @@ func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) { return errors.Errorf("invalid source policy %T", v) } for _, f := range x.Rules { + if f == nil { + return errors.Errorf("invalid nil policy rule") + } r := *f srcPol.Rules = append(srcPol.Rules, &r) } diff --git a/sourcepolicy/matcher.go b/sourcepolicy/matcher.go index 79ab4032a5ae..2abe1039071f 100644 --- a/sourcepolicy/matcher.go +++ b/sourcepolicy/matcher.go @@ -10,6 +10,9 @@ import ( func match(ctx context.Context, src *selectorCache, ref string, attrs map[string]string) (bool, error) { for _, c := range src.Constraints { + if c == nil { + return false, errors.Errorf("invalid nil constraint for %v", src) + } switch c.Condition { case spb.AttrMatch_EQUAL: if attrs[c.Key] != c.Value { diff --git a/util/tracing/transform/attribute.go b/util/tracing/transform/attribute.go index 2debe8835924..bc0df048d0a2 100644 --- a/util/tracing/transform/attribute.go +++ b/util/tracing/transform/attribute.go @@ -13,6 +13,9 @@ func Attributes(attrs []*commonpb.KeyValue) []attribute.KeyValue { out := make([]attribute.KeyValue, 0, len(attrs)) for _, a := range attrs { + if a == nil { + continue + } kv := attribute.KeyValue{ Key: attribute.Key(a.Key), Value: toValue(a.Value), @@ -42,7 +45,9 @@ func toValue(v *commonpb.AnyValue) attribute.Value { func boolArray(kv []*commonpb.AnyValue) attribute.Value { arr := make([]bool, len(kv)) for i, v := range kv { - arr[i] = v.GetBoolValue() + if v != nil { + arr[i] = v.GetBoolValue() + } } return attribute.BoolSliceValue(arr) } @@ -50,7 +55,9 @@ func boolArray(kv []*commonpb.AnyValue) attribute.Value { func intArray(kv []*commonpb.AnyValue) attribute.Value { arr := make([]int64, len(kv)) for i, v := range kv { - arr[i] = v.GetIntValue() + if v != nil { + arr[i] = v.GetIntValue() + } } return attribute.Int64SliceValue(arr) } @@ -58,7 +65,9 @@ func intArray(kv []*commonpb.AnyValue) attribute.Value { func doubleArray(kv []*commonpb.AnyValue) attribute.Value { arr := make([]float64, len(kv)) for i, v := range kv { - arr[i] = v.GetDoubleValue() + if v != nil { + arr[i] = v.GetDoubleValue() + } } return attribute.Float64SliceValue(arr) } @@ -66,13 +75,15 @@ func doubleArray(kv []*commonpb.AnyValue) attribute.Value { func stringArray(kv []*commonpb.AnyValue) attribute.Value { arr := make([]string, len(kv)) for i, v := range kv { - arr[i] = v.GetStringValue() + if v != nil { + arr[i] = v.GetStringValue() + } } return attribute.StringSliceValue(arr) } func arrayValues(kv []*commonpb.AnyValue) attribute.Value { - if len(kv) == 0 { + if len(kv) == 0 || kv[0] == nil { return attribute.StringSliceValue([]string{}) } diff --git a/util/tracing/transform/span.go b/util/tracing/transform/span.go index 9f7924c4a7e1..2273e3635d9d 100644 --- a/util/tracing/transform/span.go +++ b/util/tracing/transform/span.go @@ -32,14 +32,20 @@ func Spans(sdl []*tracepb.ResourceSpans) []tracesdk.ReadOnlySpan { } for _, sdi := range sd.ScopeSpans { - sda := make([]tracesdk.ReadOnlySpan, len(sdi.Spans)) - for i, s := range sdi.Spans { - sda[i] = &readOnlySpan{ + if sdi == nil { + continue + } + sda := make([]tracesdk.ReadOnlySpan, 0, len(sdi.Spans)) + for _, s := range sdi.Spans { + if s == nil { + continue + } + sda = append(sda, &readOnlySpan{ pb: s, is: sdi.Scope, resource: sd.Resource, schemaURL: sd.SchemaUrl, - } + }) } out = append(out, sda...) } @@ -170,6 +176,9 @@ var _ tracesdk.ReadOnlySpan = &readOnlySpan{} // status transform a OTLP span status into span code. func statusCode(st *tracepb.Status) codes.Code { + if st == nil { + return codes.Unset + } switch st.Code { case tracepb.Status_STATUS_CODE_ERROR: return codes.Error @@ -186,6 +195,9 @@ func links(links []*tracepb.Span_Link) []tracesdk.Link { sl := make([]tracesdk.Link, 0, len(links)) for _, otLink := range links { + if otLink == nil { + continue + } // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. otLink := otLink @@ -226,6 +238,9 @@ func spanEvents(es []*tracepb.Span_Event) []tracesdk.Event { if messageEvents >= maxMessageEventsPerSpan { break } + if e == nil { + continue + } messageEvents++ events = append(events, tracesdk.Event{