From 408beed6455b648286e4873647a859fd150cdd94 Mon Sep 17 00:00:00 2001 From: Ali Ok Date: Wed, 7 Feb 2024 11:44:50 +0300 Subject: [PATCH] Show subscriptions to event types in Backstage (#24) * Build subscribers list Signed-off-by: Ali Ok * Embed the consumedBy info in eventTypes Signed-off-by: Ali Ok * Make the tests working Signed-off-by: Ali Ok * Get dynamic client from the context Signed-off-by: Ali Ok * go mod tidy && go mod vendor Signed-off-by: Ali Ok * Link event mesh plugin locally Signed-off-by: Ali Ok * Store the consumer relation in a custom annotation Signed-off-by: Ali Ok * Process the custom annotation to emit relations Signed-off-by: Ali Ok * Does not work - extended the ApiEntity and added a root level field Signed-off-by: Ali Ok * Put the consumer information in the metadata as is Signed-off-by: Ali Ok * Use Backstage Kube id annotation to fetch the consumer Signed-off-by: Ali Ok * Improve code, add logging Signed-off-by: Ali Ok * Add some test cases in the backend code Signed-off-by: Ali Ok * Add more test cases in the backend code Signed-off-by: Ali Ok * Use UnsafeGuessKindToResource Signed-off-by: Ali Ok * Add some docs Signed-off-by: Ali Ok * Rename `etNameMap` to `etByNamespacedName` Signed-off-by: Ali Ok * Simplify trigger processing Signed-off-by: Ali Ok * Add code comments Signed-off-by: Ali Ok * Update architecture diagrams Signed-off-by: Ali Ok * Address comments Signed-off-by: Ali Ok * Fix provider unit tests Signed-off-by: Ali Ok * Created separate issues for TODOs Signed-off-by: Ali Ok * Tests for the processor Signed-off-by: Ali Ok * Remove trailing whitespace Signed-off-by: Ali Ok --------- Signed-off-by: Ali Ok --- README.md | 136 +++++- .../config/100-eventmesh/100-clusterrole.yaml | 12 + .../pkg/reconciler/eventmesh/annotations.go | 2 + backends/pkg/reconciler/eventmesh/broker.go | 31 +- .../pkg/reconciler/eventmesh/controller.go | 5 +- .../pkg/reconciler/eventmesh/eventmesh.go | 1 + .../pkg/reconciler/eventmesh/eventtype.go | 40 +- backends/pkg/reconciler/eventmesh/handler.go | 191 +++++++- .../pkg/reconciler/eventmesh/handler_test.go | 415 +++++++++++++++++- backends/pkg/reconciler/eventmesh/util.go | 16 +- backstage/packages/backend/package.json | 2 +- .../packages/backend/src/plugins/catalog.ts | 13 +- .../src/providers/index.ts | 1 + .../knativeEventMeshProcessor.test.ts | 216 +++++++++ .../providers/knativeEventMeshProcessor.ts | 117 +++++ .../knativeEventMeshProvider.test.ts | 3 + .../src/providers/knativeEventMeshProvider.ts | 29 +- .../src/providers/types.ts | 5 + backstage/yarn.lock | 12 + go.mod | 2 +- .../v2/internal/httprule/BUILD.bazel | 35 ++ .../grpc-gateway/v2/runtime/BUILD.bazel | 97 ++++ .../grpc-gateway/v2/utilities/BUILD.bazel | 31 ++ .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 17 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 14 + .../apimachinery/pkg/api/resource/OWNERS | 11 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 16 + .../apimachinery/pkg/util/mergepatch/OWNERS | 6 + .../pkg/util/strategicpatch/OWNERS | 9 + .../third_party/forked/golang/json/OWNERS | 6 + vendor/k8s.io/client-go/openapi/OWNERS | 4 + .../pkg/apis/clientauthentication/OWNERS | 8 + vendor/k8s.io/client-go/rest/OWNERS | 14 + vendor/k8s.io/client-go/tools/auth/OWNERS | 8 + vendor/k8s.io/client-go/tools/cache/OWNERS | 28 ++ .../client-go/tools/leaderelection/OWNERS | 11 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 5 + vendor/k8s.io/client-go/tools/record/OWNERS | 6 + vendor/k8s.io/client-go/transport/OWNERS | 8 + vendor/k8s.io/client-go/util/cert/OWNERS | 8 + vendor/k8s.io/client-go/util/keyutil/OWNERS | 6 + vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/code-generator/OWNERS | 15 + .../code-generator/cmd/client-gen/OWNERS | 10 + .../code-generator/cmd/go-to-protobuf/OWNERS | 6 + vendor/k8s.io/klog/OWNERS | 19 + vendor/k8s.io/klog/v2/OWNERS | 14 + .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 + vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/knative.dev/eventing/hack/OWNERS | 10 + .../apis/duck/v1beta1/channelable_types.go | 152 +++++++ .../apis/duck/v1beta1/delivery_conversion.go | 79 ++++ .../pkg/apis/duck/v1beta1/delivery_types.go | 118 +++++ .../eventing/pkg/apis/duck/v1beta1/doc.go | 24 + .../apis/duck/v1beta1/subscribable_types.go | 162 +++++++ .../v1beta1/subscribable_types_conversion.go | 200 +++++++++ .../duck/v1beta1/zz_generated.deepcopy.go | 349 +++++++++++++++ .../informers/eventing/v1/trigger/trigger.go | 52 +++ .../eventing/test/lib/resources/constants.go | 87 ++++ .../eventing/test/lib/resources/eventing.go | 324 ++++++++++++++ .../eventing/test/lib/resources/kube.go | 304 +++++++++++++ .../eventing/test/lib/resources/meta.go | 52 +++ .../eventing/test/lib/resources/serving.go | 138 ++++++ vendor/knative.dev/hack/OWNERS | 8 + vendor/knative.dev/hack/OWNERS_ALIASES | 183 ++++++++ vendor/knative.dev/pkg/apis/OWNERS | 15 + vendor/knative.dev/pkg/apis/duck/OWNERS | 8 + vendor/knative.dev/pkg/controller/OWNERS | 7 + vendor/knative.dev/pkg/hack/OWNERS | 10 + vendor/knative.dev/pkg/reconciler/OWNERS | 7 + vendor/knative.dev/pkg/resolver/OWNERS | 8 + vendor/knative.dev/pkg/test/OWNERS | 10 + vendor/knative.dev/pkg/test/README.md | 218 +++++++++ vendor/knative.dev/pkg/test/cleanup.go | 65 +++ vendor/knative.dev/pkg/test/clients.go | 98 +++++ vendor/knative.dev/pkg/test/crd.go | 95 ++++ vendor/knative.dev/pkg/test/e2e_flags.go | 81 ++++ .../pkg/test/environment/config.go | 60 +++ .../knative.dev/pkg/test/ingress/ingress.go | 92 ++++ vendor/knative.dev/pkg/test/kube_checks.go | 282 ++++++++++++ .../knative.dev/pkg/test/presubmit-tests.sh | 36 ++ vendor/knative.dev/pkg/test/request.go | 160 +++++++ .../pkg/test/spoof/error_checks.go | 70 +++ vendor/knative.dev/pkg/test/spoof/request.go | 38 ++ .../pkg/test/spoof/response_checks.go | 105 +++++ vendor/knative.dev/pkg/test/spoof/spoof.go | 360 +++++++++++++++ .../pkg/test/test-reconciler-codegen.sh | 63 +++ vendor/knative.dev/pkg/test/tinterface.go | 40 ++ vendor/knative.dev/pkg/webhook/OWNERS | 7 + vendor/modules.txt | 7 + vendor/sigs.k8s.io/json/OWNERS | 6 + vendor/sigs.k8s.io/yaml/OWNERS | 23 + vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 + 94 files changed, 5827 insertions(+), 91 deletions(-) create mode 100644 backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.test.ts create mode 100644 backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.ts create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/rest/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/transport/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/code-generator/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/v2/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/knative.dev/eventing/hack/OWNERS create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/channelable_types.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_conversion.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_types.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/doc.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types_conversion.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger/trigger.go create mode 100644 vendor/knative.dev/eventing/test/lib/resources/constants.go create mode 100644 vendor/knative.dev/eventing/test/lib/resources/eventing.go create mode 100644 vendor/knative.dev/eventing/test/lib/resources/kube.go create mode 100644 vendor/knative.dev/eventing/test/lib/resources/meta.go create mode 100644 vendor/knative.dev/eventing/test/lib/resources/serving.go create mode 100644 vendor/knative.dev/hack/OWNERS create mode 100644 vendor/knative.dev/hack/OWNERS_ALIASES create mode 100644 vendor/knative.dev/pkg/apis/OWNERS create mode 100644 vendor/knative.dev/pkg/apis/duck/OWNERS create mode 100644 vendor/knative.dev/pkg/controller/OWNERS create mode 100644 vendor/knative.dev/pkg/hack/OWNERS create mode 100644 vendor/knative.dev/pkg/reconciler/OWNERS create mode 100644 vendor/knative.dev/pkg/resolver/OWNERS create mode 100644 vendor/knative.dev/pkg/test/OWNERS create mode 100644 vendor/knative.dev/pkg/test/README.md create mode 100644 vendor/knative.dev/pkg/test/cleanup.go create mode 100644 vendor/knative.dev/pkg/test/clients.go create mode 100644 vendor/knative.dev/pkg/test/crd.go create mode 100644 vendor/knative.dev/pkg/test/e2e_flags.go create mode 100644 vendor/knative.dev/pkg/test/environment/config.go create mode 100644 vendor/knative.dev/pkg/test/ingress/ingress.go create mode 100644 vendor/knative.dev/pkg/test/kube_checks.go create mode 100644 vendor/knative.dev/pkg/test/presubmit-tests.sh create mode 100644 vendor/knative.dev/pkg/test/request.go create mode 100644 vendor/knative.dev/pkg/test/spoof/error_checks.go create mode 100644 vendor/knative.dev/pkg/test/spoof/request.go create mode 100644 vendor/knative.dev/pkg/test/spoof/response_checks.go create mode 100644 vendor/knative.dev/pkg/test/spoof/spoof.go create mode 100644 vendor/knative.dev/pkg/test/test-reconciler-codegen.sh create mode 100644 vendor/knative.dev/pkg/test/tinterface.go create mode 100644 vendor/knative.dev/pkg/webhook/OWNERS create mode 100644 vendor/sigs.k8s.io/json/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS diff --git a/README.md b/README.md index 845241f8..a979a140 100644 --- a/README.md +++ b/README.md @@ -14,30 +14,130 @@ See [Event Mesh plugin README file](./backstage/plugins/knative-event-mesh-backe The architecture of the plugin is as follows: ``` - Kubernetes Backstage -┌────────────────────┐ ┌─────────────────────┐ -│ │ │ │ -│ ┌───────────────┐ │ │ Plugin │ -│ │ │ │ │ ┌─────────────────┐ │ -│ │ Backend ◄──┼────┐ │ │ │ │ -│ │ │ │ │ │ │ ┌─────────────┐ │ │ -│ └───────┬───────┘ │ │ │ │ │ │ │ │ -│ │ │ └──────┼─┼─┤ Provider │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ │ │ └─────────────┘ │ │ -│ ┌───────▼───────┐ │ │ │ │ │ -│ │ │ │ │ └─────────────────┘ │ -│ │ API Server │ │ │ │ -│ │ │ │ └─────────────────────┘ -│ └───────────────┘ │ -│ │ -└────────────────────┘ + Kubernetes Backstage +┌────────────────────┐ ┌───────────────────────────────────────────────┐ +│ │ │ Plugin │ +│ ┌───────────────┐ │ │ ┌─────────────────┐ ┌───────────────┐ │ +│ │ │ │ │ │ │ │ │ │ +│ │ Backend ◄──┼────┐ │ │ ┌─────────────┐ │ │ │ │ +│ │ │ │ │ │ │ │ │ │ │ │ │ +│ └───────┬───────┘ │ └──────┼─┼─┤ Provider ├─┼────────► │ │ +│ │ │ │ │ │ │ │ │ │ │ +│ │ │ │ │ └─────────────┘ │ │ │ │ +│ │ │ │ │ │ │ │ │ +│ ┌───────▼───────┐ │ │ │ │ │ │ │ +│ │ │ │ │ │ ┌────────┼────────┤ Catalog │ │ +│ │ API Server │ │ │ │ │ │ │ │ │ +│ │ │ │ │ │ │ │ │ │ │ +│ └───────────────┘ │ │ │ ┌──────▼──────┐ │ │ │ │ +│ │ │ │ │ │ │ │ │ │ +└────────────────────┘ │ │ │ Processor ├─┼────────► │ │ + │ │ │ │ │ │ │ │ + │ │ └─────────────┘ │ │ │ │ + │ │ │ │ │ │ + │ └─────────────────┘ └───────────────┘ │ + └───────────────────────────────────────────────┘ ``` The plugin use providers (and possibly other mechanisms) to communicate with a special backend-for-frontend. This backend talks to the Kubernetes API server to get information about the resources in the cluster. +```mermaid +--- +title: Overall +--- +flowchart TD + Start --> FetchBrokers + FetchBrokers --> ProcessBrokers + ProcessBrokers --> FetchEventTypes + FetchEventTypes --> ProcessEventTypes + ProcessEventTypes --> FetchTriggers + FetchTriggers --> ProcessTriggers +``` + +## Processing the brokers + +```mermaid +--- +title: ProcessBrokers +--- +flowchart LR + GetNextBroker --> CreateDTO +``` + +## Processing the event types + +```mermaid +--- +title: ProcessEventTypes +--- +flowchart TD + GetEventType[Get next event type] + CheckRef{spec.ref exists?} + RefIsABrokerInTheBrokerMap{ref is a broker in the previously
built broker map?} + RegisterEventType[Add event type to broker DTO's `providedEventTypes` list] + DontRegisterEventType[Don't relate the event type to any broker] + Done[Done] + + GetEventType --> CheckRef + CheckRef --> |Yes| RefIsABrokerInTheBrokerMap + RefIsABrokerInTheBrokerMap --> |Yes| RegisterEventType + + CheckRef --> |No| DontRegisterEventType + RefIsABrokerInTheBrokerMap --> |No| DontRegisterEventType + RegisterEventType --> Done + DontRegisterEventType --> Done +``` + +## Processing the triggers + +```mermaid +--- +title: ProcessTriggers +--- +flowchart TD + GetTrigger[Get next trigger] + CheckSubscriberRef{spec.subscriber.ref
exists?} + FetchSubscriberRef[Fetch subscriber resource] + CheckSubscriberLabel{Subscriber has the
Backstage label} + CheckEventType{Trigger has an
event type} + RegisterSingleRelation[Register `ConsumedBy` relation
for eventType and subscriber] + RegisterRelation[Register `ConsumedBy` relation
for eventType and subscriber] + + + Ignore[Ignore trigger] + + Done[Done] + + GetTrigger --> CheckSubscriberRef + CheckSubscriberRef --> |Yes| FetchSubscriberRef + FetchSubscriberRef --> CheckSubscriberLabel + CheckSubscriberLabel --> |Yes| CheckEventType + CheckEventType --> |Yes| RegisterSingleRelation + CheckEventType --> |No| FetchAllEventTypesForBroker + FetchAllEventTypesForBroker --> ForEachEventType --> RegisterRelation + + + RegisterSingleRelation --> Done + RegisterRelation --> Done + + + CheckSubscriberLabel --> |No| Ignore + CheckSubscriberRef --> |No| Ignore + + Ignore --> Done + + CheckSubscriberRef -.- CheckSubscriberRefNote["We can't collect subscriber information using the URL.
So, let's simply check the subsciber ref."] + CheckSubscriberLabel -.- CheckSubscriberLabelNote["The target is to show what resource is using what event types.
However, Backstage will only show the resource if it has a special label.
So, if that label is missing, simply ignore the subscriber."] + CheckEventType -.- CheckEventTypeNote["If the trigger has an event type filter,
that means the subscriber is subscribed to that event.
If not, the subscriber is subscribed to all events from this trigger.
Please note that we ignore other filtering mechanisms such as 'source'."] + + CheckSubscriberRefNote:::note + CheckSubscriberLabelNote:::note + CheckEventTypeNote:::note + classDef note fill:yellow +``` + #### Running the backend The backend is a Go project that runs in a Kubernetes cluster. diff --git a/backends/config/100-eventmesh/100-clusterrole.yaml b/backends/config/100-eventmesh/100-clusterrole.yaml index 3f889fed..4f0c587a 100644 --- a/backends/config/100-eventmesh/100-clusterrole.yaml +++ b/backends/config/100-eventmesh/100-clusterrole.yaml @@ -25,6 +25,7 @@ rules: resources: - brokers - eventtypes + - triggers verbs: - get - list @@ -43,3 +44,14 @@ rules: - delete - patch - watch + + + # permissions to get subscribers for triggers + # as subscribers can be any resource, we need to give access to all resources + # we fetch subscribers one by one, we only need `get` verb + - apiGroups: + - "*" + resources: + - "*" + verbs: + - get diff --git a/backends/pkg/reconciler/eventmesh/annotations.go b/backends/pkg/reconciler/eventmesh/annotations.go index 382cf46d..8040570d 100644 --- a/backends/pkg/reconciler/eventmesh/annotations.go +++ b/backends/pkg/reconciler/eventmesh/annotations.go @@ -4,6 +4,8 @@ var ExcludedAnnotations = map[string]struct{}{ "kubectl.kubernetes.io/last-applied-configuration": {}, } +// FilterAnnotations filters out annotations that are not interesting to provide to the Backstage plugin. +// Specifically, it filters out the annotations in ExcludedAnnotations. func FilterAnnotations(annotations map[string]string) map[string]string { if annotations == nil { return nil diff --git a/backends/pkg/reconciler/eventmesh/broker.go b/backends/pkg/reconciler/eventmesh/broker.go index 6213e0f4..553455d1 100644 --- a/backends/pkg/reconciler/eventmesh/broker.go +++ b/backends/pkg/reconciler/eventmesh/broker.go @@ -4,20 +4,35 @@ import ( eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" ) +// Broker is a simplified representation of a Knative Eventing Broker that is easier to consume by the Backstage plugin. type Broker struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - UID string `json:"uid"` - Labels map[string]string `json:"labels,omitempty"` + // Namespace of the broker + Namespace string `json:"namespace"` + + // Name of the broker + Name string `json:"name"` + + // UID of the broker + UID string `json:"uid"` + + // Labels of the broker. These are passed as is. + Labels map[string]string `json:"labels,omitempty"` + + // Annotations of the broker. These are passed as is, except that are filtered out by the FilterAnnotations function. Annotations map[string]string `json:"annotations,omitempty"` - // + + // ProvidedEventTypes is a list of event types that the broker provides. + // This is a list of strings, where each string is a "/" of the event type. ProvidedEventTypes []string `json:"providedEventTypes,omitempty"` } -func (b Broker) GetNameAndNamespace() string { - return NameAndNamespace(b.Namespace, b.Name) +// GetNamespacedName returns the name and namespace of the broker in the format "/" +func (b Broker) GetNamespacedName() string { + return NamespacedName(b.Namespace, b.Name) } +// convertBroker converts a Knative Eventing Broker to a simplified representation that is easier to consume by the Backstage plugin. +// see Broker. func convertBroker(br *eventingv1.Broker) Broker { return Broker{ Name: br.Name, @@ -25,7 +40,7 @@ func convertBroker(br *eventingv1.Broker) Broker { UID: string(br.UID), Labels: br.Labels, Annotations: FilterAnnotations(br.Annotations), - // to be filled later + // this field will be populated later on, when we have the list of event types ProvidedEventTypes: []string{}, } } diff --git a/backends/pkg/reconciler/eventmesh/controller.go b/backends/pkg/reconciler/eventmesh/controller.go index 8e4adf63..6286e327 100644 --- a/backends/pkg/reconciler/eventmesh/controller.go +++ b/backends/pkg/reconciler/eventmesh/controller.go @@ -13,6 +13,7 @@ import ( eventtypereconciler "knative.dev/eventing/pkg/client/injection/reconciler/eventing/v1beta2/eventtype" brokerinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1/broker" + triggerinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger" eventtypeinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1beta2/eventtype" eventinglistersv1 "knative.dev/eventing/pkg/client/listers/eventing/v1" @@ -22,6 +23,7 @@ import ( type Listers struct { EventTypeLister eventinglistersv1beta2.EventTypeLister BrokerLister eventinglistersv1.BrokerLister + TriggerLister eventinglistersv1.TriggerLister } func NewController(ctx context.Context) *controller.Impl { @@ -40,6 +42,7 @@ func NewController(ctx context.Context) *controller.Impl { listers := Listers{ EventTypeLister: eventtypeinformer.Get(ctx).Lister(), BrokerLister: brokerinformer.Get(ctx).Lister(), + TriggerLister: triggerinformer.Get(ctx).Lister(), } go startWebServer(ctx, listers) @@ -56,7 +59,7 @@ func startWebServer(ctx context.Context, listers Listers) { r := mux.NewRouter() r.Use(commonMiddleware) - r.HandleFunc("/", EventMeshHandler(ctx, listers)).Methods("GET") + r.HandleFunc("/", HttpHandler(ctx, listers)).Methods("GET") http.Handle("/", r) log.Fatal(http.ListenAndServe(":8080", r)) diff --git a/backends/pkg/reconciler/eventmesh/eventmesh.go b/backends/pkg/reconciler/eventmesh/eventmesh.go index 4d60e7d1..55ab1aa3 100644 --- a/backends/pkg/reconciler/eventmesh/eventmesh.go +++ b/backends/pkg/reconciler/eventmesh/eventmesh.go @@ -8,6 +8,7 @@ import ( eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2" ) +// Reconciler is a stub reconciler for getting the informers injected by sharedmain. type Reconciler struct { } diff --git a/backends/pkg/reconciler/eventmesh/eventtype.go b/backends/pkg/reconciler/eventmesh/eventtype.go index 3b5cf533..488cf35f 100644 --- a/backends/pkg/reconciler/eventmesh/eventtype.go +++ b/backends/pkg/reconciler/eventmesh/eventtype.go @@ -4,23 +4,37 @@ import ( "knative.dev/eventing/pkg/apis/eventing/v1beta2" ) +// EventType is a simplified representation of a Knative Eventing EventType that is easier to consume by the Backstage plugin. type EventType struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - Type string `json:"type"` - UID string `json:"uid"` - Description string `json:"description,omitempty"` - SchemaData string `json:"schemaData,omitempty"` - SchemaURL string `json:"schemaURL,omitempty"` - Labels map[string]string `json:"labels,omitempty"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Type string `json:"type"` + UID string `json:"uid"` + Description string `json:"description,omitempty"` + SchemaData string `json:"schemaData,omitempty"` + SchemaURL string `json:"schemaURL,omitempty"` + // Labels of the event type. These are passed as is. + Labels map[string]string `json:"labels,omitempty"` + // Annotations of the event type. These are passed as is, except that are filtered out by the FilterAnnotations function. Annotations map[string]string `json:"annotations,omitempty"` - Reference string `json:"reference,omitempty"` + // Reference is the ET's reference to a resource like a broker or a channel. It is in the format "/". + Reference string `json:"reference,omitempty"` + // ConsumedBy is a list of the consumers of the event type. + ConsumedBy []string `json:"consumedBy,omitempty"` } -func (et EventType) NameAndNamespace() string { - return NameAndNamespace(et.Namespace, et.Name) +// NamespacedName returns the name and namespace of the event type in the format "/" +func (et EventType) NamespacedName() string { + return NamespacedName(et.Namespace, et.Name) } +// NamespacedType returns the type and namespace of the event type in the format "/" +func (et EventType) NamespacedType() string { + return NamespacedName(et.Namespace, et.Type) +} + +// convertEventType converts a Knative Eventing EventType to a simplified representation that is easier to consume by the Backstage plugin. +// see EventType. func convertEventType(et *v1beta2.EventType) EventType { return EventType{ Name: et.Name, @@ -32,6 +46,8 @@ func convertEventType(et *v1beta2.EventType) EventType { SchemaURL: et.Spec.Schema.String(), Labels: et.Labels, Annotations: FilterAnnotations(et.Annotations), - Reference: RefNameAndNamespace(et.Spec.Reference), + Reference: NamespacedRefName(et.Spec.Reference), + // this field will be populated later on, when we have process the triggers + ConsumedBy: make([]string, 0), } } diff --git a/backends/pkg/reconciler/eventmesh/handler.go b/backends/pkg/reconciler/eventmesh/handler.go index f43a2b83..899bb760 100644 --- a/backends/pkg/reconciler/eventmesh/handler.go +++ b/backends/pkg/reconciler/eventmesh/handler.go @@ -2,9 +2,20 @@ package eventmesh import ( "context" + "fmt" "net/http" "sort" + "k8s.io/apimachinery/pkg/api/meta" + duckv1 "knative.dev/pkg/apis/duck/v1" + + "knative.dev/pkg/injection/clients/dynamicclient" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" eventinglistersv1beta2 "knative.dev/eventing/pkg/client/listers/eventing/v1beta2" "go.uber.org/zap" @@ -17,21 +28,33 @@ import ( eventinglistersv1 "knative.dev/eventing/pkg/client/listers/eventing/v1" ) +// EventMesh is the top-level struct that holds the event mesh data. +// It's the struct that's serialized and sent to the Backstage plugin. type EventMesh struct { - // not every event type is tied to a broker. thus, we need to send event types as well. + // EventTypes is a list of all event types in the cluster. + // While we can embed the event types in the brokers, we keep them separate because + // not every event type is tied to a broker. EventTypes []*EventType `json:"eventTypes"` - Brokers []*Broker `json:"brokers"` + + // Brokers is a list of all brokers in the cluster. + Brokers []*Broker `json:"brokers"` } -type EventTypeMap = map[string]*EventType +// BackstageKubernetesIDLabel is the label that's used to identify Backstage resources. +// In Backstage Kubernetes plugin, a Backstage entity (e.g. a service) is tied to a Kubernetes resource +// using this label. +// see Backstage Kubernetes plugin for more details. +const BackstageKubernetesIDLabel = "backstage.io/kubernetes-id" -func EventMeshHandler(ctx context.Context, listers Listers) func(w http.ResponseWriter, req *http.Request) { +// HttpHandler is the HTTP handler that's used to serve the event mesh data. +func HttpHandler(ctx context.Context, listers Listers) func(w http.ResponseWriter, req *http.Request) { logger := logging.FromContext(ctx) + // this handler simply calls the event mesh builder and returns the result as JSON return func(w http.ResponseWriter, req *http.Request) { logger.Debugw("Handling request", "method", req.Method, "url", req.URL) - eventMesh, err := BuildEventMesh(listers, logger) + eventMesh, err := BuildEventMesh(ctx, listers, logger) if err != nil { logger.Errorw("Error building event mesh", "error", err) http.Error(w, err.Error(), http.StatusInternalServerError) @@ -47,32 +70,69 @@ func EventMeshHandler(ctx context.Context, listers Listers) func(w http.Response } } -func BuildEventMesh(listers Listers, logger *zap.SugaredLogger) (EventMesh, error) { +// BuildEventMesh builds the event mesh data by fetching and converting the Kubernetes resources. +// The procedure is as follows: +// - Fetch the brokers and convert them to the representation that's consumed by the Backstage plugin. +// - Do the same for event types. +// - Fetch the triggers, find out what event types they're subscribed to and find out the resources that are receiving the events. +// - Make a connection between the event types and the subscribers. Store this connection in the eventType struct. +func BuildEventMesh(ctx context.Context, listers Listers, logger *zap.SugaredLogger) (EventMesh, error) { + // fetch the brokers and convert them to the representation that's consumed by the Backstage plugin. convertedBrokers, err := fetchBrokers(listers.BrokerLister, logger) if err != nil { logger.Errorw("Error fetching and converting brokers", "error", err) return EventMesh{}, err } + // build a map for easier access. + // we need this map to register the event types in the brokers when we are processing the event types. + // map key: "/" brokerMap := make(map[string]*Broker) for _, cbr := range convertedBrokers { - brokerMap[cbr.GetNameAndNamespace()] = cbr + brokerMap[cbr.GetNamespacedName()] = cbr } + // fetch the event types and convert them to the representation that's consumed by the Backstage plugin. convertedEventTypes, err := fetchEventTypes(listers.EventTypeLister, logger) if err != nil { logger.Errorw("Error fetching and converting event types", "error", err) return EventMesh{}, err } + // register the event types in the brokers for _, et := range convertedEventTypes { if et.Reference != "" { if br, ok := brokerMap[et.Reference]; ok { - br.ProvidedEventTypes = append(br.ProvidedEventTypes, et.NameAndNamespace()) + br.ProvidedEventTypes = append(br.ProvidedEventTypes, et.NamespacedName()) } } } + // fetch the triggers we will process them later + triggers, err := listers.TriggerLister.List(labels.Everything()) + if err != nil { + logger.Errorw("Error listing triggers", "error", err) + return EventMesh{}, err + } + + // build a map for easier access to the ETs by their namespaced name. + // we need this map when processing the triggers to find out ET definitions for the ET references + // brokers provide. + // map key: "/" + etByNamespacedName := make(map[string]*EventType) + for _, et := range convertedEventTypes { + etByNamespacedName[et.NamespacedName()] = et + } + + for _, trigger := range triggers { + err := processTrigger(ctx, trigger, brokerMap, etByNamespacedName, logger) + if err != nil { + logger.Errorw("Error processing trigger", "error", err) + // do not stop the Backstage plugin from rendering the rest of the data, e.g. because + // there are no permissions to get a single subscriber resource + } + } + eventMesh := EventMesh{ EventTypes: convertedEventTypes, Brokers: convertedBrokers, @@ -81,6 +141,99 @@ func BuildEventMesh(listers Listers, logger *zap.SugaredLogger) (EventMesh, erro return eventMesh, nil } +// processTrigger processes the trigger and updates the ETs that the trigger is subscribed to. +// The consumedBy fields of ETs are updated with the subscriber's Backstage ID. +func processTrigger(ctx context.Context, trigger *eventingv1.Trigger, brokerMap map[string]*Broker, etByNamespacedName map[string]*EventType, logger *zap.SugaredLogger) error { + // if the trigger has no subscriber, we can skip it, there's no relation to show on Backstage side + if trigger.Spec.Subscriber.Ref == nil { + logger.Debugw("Trigger has no subscriber ref; cannot process this trigger", "namespace", trigger.Namespace, "trigger", trigger.Name) + return nil + } + + dynamicClient := dynamicclient.Get(ctx) + subscriberBackstageId, err := getSubscriberBackstageId(ctx, dynamicClient, trigger.Spec.Subscriber.Ref, logger) + if err != nil { + // wrap the error to provide more context + return fmt.Errorf("error getting subscriber backstage id: %w", err) + } + + // we only care about subscribers that are in Backstage + if len(subscriberBackstageId) == 0 { + logger.Debugw("Subscriber has no backstage id", "namespace", trigger.Namespace, "trigger", trigger.Name) + return nil + } + + // if the trigger's broker is not set or if we haven't processed the broker, we can skip the trigger + if trigger.Spec.Broker == "" { + logger.Errorw("Trigger has no broker", "namespace", trigger.Namespace, "trigger", trigger.Name) + return nil + } + brokerRef := NamespacedName(trigger.Namespace, trigger.Spec.Broker) + if _, ok := brokerMap[brokerRef]; !ok { + logger.Infow("Broker not found", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", trigger.Spec.Broker) + return nil + } + + eventTypes := collectSubscribedEventTypes(trigger, brokerMap[brokerRef], etByNamespacedName, logger) + logger.Debugw("Collected subscribed event types", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", trigger.Spec.Broker, "eventTypes", eventTypes) + + for _, eventType := range eventTypes { + eventType.ConsumedBy = append(eventType.ConsumedBy, subscriberBackstageId) + } + + return nil +} + +// collectSubscribedEventTypes collects the event types that the trigger is subscribed to. +// It does it by checking the trigger's filter and finding out the ET types that the filter is interested in. +// Later on, it finds the ETs that the broker provides and returns the ones matches the type. +// If the trigger has no filter, it returns all the ETs that the broker provides. +func collectSubscribedEventTypes(trigger *eventingv1.Trigger, broker *Broker, etByNamespacedName map[string]*EventType, logger *zap.SugaredLogger) []*EventType { + logger.Debugw("Collecting subscribed event types", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name) + + // TODO: we don't handle the CESQL yet + if trigger.Spec.Filter != nil && len(trigger.Spec.Filter.Attributes) > 0 { + logger.Debugw("Trigger has filter", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name, "filter", trigger.Spec.Filter.Attributes) + + // check if "type" attribute is present + if subscribedEventType, ok := trigger.Spec.Filter.Attributes["type"]; ok { + logger.Debugw("Trigger has type filter", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name, "type", subscribedEventType) + + // it can be present but empty + // in that case, we assume the trigger is subscribed to all event types + if subscribedEventType != eventingv1.TriggerAnyFilter { + logger.Debugw("Trigger has non-empty type filter", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name, "type", subscribedEventType) + + // if type is present and not empty, that means the trigger is subscribed to a ETs of that type + // find the ETs for that type + subscribedEventTypes := make([]*EventType, 0) + for _, etNamespacedName := range broker.ProvidedEventTypes { + if et, ok := etByNamespacedName[etNamespacedName]; ok { + if et.Type == subscribedEventType { + subscribedEventTypes = append(subscribedEventTypes, et) + } + } + } + logger.Debugw("Found subscribed event types", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name, "subscribedEventTypes", subscribedEventTypes) + return subscribedEventTypes + } + } + } + + logger.Debugw("Trigger has no filter or type, returning all event types the broker provides", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name) + // if no filter or type is specified, we assume the resource is interested in all event types that the broker provides + subscribedEventTypes := make([]*EventType, 0, len(broker.ProvidedEventTypes)) + for _, eventType := range broker.ProvidedEventTypes { + if et, ok := etByNamespacedName[eventType]; ok { + subscribedEventTypes = append(subscribedEventTypes, et) + } + } + + logger.Debugw("Found event types", "namespace", trigger.Namespace, "trigger", trigger.Name, "broker", broker.Name, "eventTypes", subscribedEventTypes) + return subscribedEventTypes +} + +// fetchBrokers fetches the brokers and converts them to the representation that's consumed by the Backstage plugin. func fetchBrokers(brokerLister eventinglistersv1.BrokerLister, logger *zap.SugaredLogger) ([]*Broker, error) { fetchedBrokers, err := brokerLister.List(labels.Everything()) if err != nil { @@ -96,6 +249,7 @@ func fetchBrokers(brokerLister eventinglistersv1.BrokerLister, logger *zap.Sugar return convertedBrokers, err } +// fetchEventTypes fetches the event types and converts them to the representation that's consumed by the Backstage plugin. func fetchEventTypes(eventTypeLister eventinglistersv1beta2.EventTypeLister, logger *zap.SugaredLogger) ([]*EventType, error) { fetchedEventTypes, err := eventTypeLister.List(labels.Everything()) if err != nil { @@ -118,3 +272,24 @@ func fetchEventTypes(eventTypeLister eventinglistersv1beta2.EventTypeLister, log return convertedEventTypes, err } + +// getSubscriberBackstageId fetches the subscriber resource and returns the Backstage ID if it's present. +func getSubscriberBackstageId(ctx context.Context, client dynamic.Interface, subRef *duckv1.KReference, logger *zap.SugaredLogger) (string, error) { + refGvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(subRef.APIVersion, subRef.Kind)) + + resource, err := client.Resource(refGvr).Namespace(subRef.Namespace).Get(ctx, subRef.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + logger.Debugw("Subscriber resource not found", "resource", subRef.Name) + return "", nil + } + if err != nil { + logger.Errorw("Error fetching resource", "error", err) + return "", err + } + + // check if the resource has the Backstage label + if backstageId, ok := resource.GetLabels()[BackstageKubernetesIDLabel]; ok { + return backstageId, nil + } + return "", nil +} diff --git a/backends/pkg/reconciler/eventmesh/handler_test.go b/backends/pkg/reconciler/eventmesh/handler_test.go index 6c30c20b..d12abdbf 100644 --- a/backends/pkg/reconciler/eventmesh/handler_test.go +++ b/backends/pkg/reconciler/eventmesh/handler_test.go @@ -1,9 +1,14 @@ package eventmesh import ( + "context" "testing" + "k8s.io/client-go/dynamic/fake" + "knative.dev/pkg/injection/clients/dynamicclient" + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "knative.dev/pkg/apis" @@ -20,18 +25,22 @@ import ( testingv1 "knative.dev/eventing/pkg/reconciler/testing/v1" testingv1beta2 "knative.dev/eventing/pkg/reconciler/testing/v1beta2" + + corev1 "k8s.io/api/core/v1" ) func TestBuildEventMesh(t *testing.T) { tests := []struct { - name string - brokers []*eventingv1.Broker - eventTypes []*eventingv1beta2.EventType - want EventMesh - error bool + name string + brokers []*eventingv1.Broker + eventTypes []*eventingv1beta2.EventType + triggers []*eventingv1.Trigger + extraObjects []runtime.Object + want EventMesh + error bool }{ { - name: "With 1 broker and 1 type", + name: "With 1 broker, 1 type, 1 trigger", brokers: []*eventingv1.Broker{ testingv1.NewBroker("test-broker", "test-ns", // following fields are not used in any logic and simply returned @@ -56,6 +65,29 @@ func TestBuildEventMesh(t *testing.T) { WithEventTypeAnnotations(map[string]string{"test-eventtype-annotation": "foo"}), ), }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "test-broker", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + WithEventTypeFilter("test-eventtype-type"), + ), + }, + extraObjects: []runtime.Object{ + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-subscriber", + Namespace: "test-ns", + Labels: map[string]string{"backstage.io/kubernetes-id": "test-subscriber"}, + }, + }, + }, want: EventMesh{ Brokers: []*Broker{ { @@ -78,12 +110,42 @@ func TestBuildEventMesh(t *testing.T) { Labels: map[string]string{"test-eventtype-label": "foo"}, Annotations: map[string]string{"test-eventtype-annotation": "foo"}, Reference: "test-ns/test-broker", + ConsumedBy: []string{"test-subscriber"}, + }, + }, + }, + }, + { + name: "With 1 broker, 1 type, no triggers", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", + Namespace: "test-ns", + ProvidedEventTypes: []string{"test-ns/test-eventtype"}}, + }, + EventTypes: []*EventType{ + { + Name: "test-eventtype", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker", + ConsumedBy: []string{}, }, }, }, }, { - name: "With 1 broker and 2 eventtypes with different spec.types", + name: "With 1 broker and 2 eventtypes with different spec.types, no triggers", brokers: []*eventingv1.Broker{ testingv1.NewBroker("test-broker", "test-ns"), }, @@ -105,22 +167,24 @@ func TestBuildEventMesh(t *testing.T) { }, EventTypes: []*EventType{ { - Name: "test-eventtype-1", - Namespace: "test-ns", - Type: "test-eventtype-type-1", - Reference: "test-ns/test-broker", + Name: "test-eventtype-1", + Namespace: "test-ns", + Type: "test-eventtype-type-1", + Reference: "test-ns/test-broker", + ConsumedBy: []string{}, }, { - Name: "test-eventtype-2", - Namespace: "test-ns", - Type: "test-eventtype-type-2", - Reference: "", + Name: "test-eventtype-2", + Namespace: "test-ns", + Type: "test-eventtype-type-2", + Reference: "", + ConsumedBy: []string{}, }, }, }, }, { - name: "With 2 brokers and 2 eventtypes with same spec.types", + name: "With 2 brokers and 2 eventtypes with same spec.types, no triggers", brokers: []*eventingv1.Broker{ testingv1.NewBroker("test-broker-1", "test-ns"), testingv1.NewBroker("test-broker-2", "test-ns"), @@ -150,16 +214,297 @@ func TestBuildEventMesh(t *testing.T) { }, EventTypes: []*EventType{ { - Name: "test-eventtype-1", + Name: "test-eventtype-1", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker-1", + ConsumedBy: []string{}, + }, + { + Name: "test-eventtype-2", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker-2", + ConsumedBy: []string{}, + }, + }, + }, + }, + { + name: "Ignore triggers that are not bound to a broker that exists", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "UNKNOWN-BROKER", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + WithEventTypeFilter("test-eventtype-type"), + ), + }, + extraObjects: []runtime.Object{ + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-subscriber", Namespace: "test-ns", - Type: "test-eventtype-type", - Reference: "test-ns/test-broker-1", + Labels: map[string]string{"backstage.io/kubernetes-id": "test-subscriber"}, }, + }, + }, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", + Namespace: "test-ns", + ProvidedEventTypes: []string{"test-ns/test-eventtype"}}, + }, + EventTypes: []*EventType{ { - Name: "test-eventtype-2", + Name: "test-eventtype", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker", + ConsumedBy: []string{}, + }, + }, + }, + }, + { + name: "Ignore triggers that are not bound to a subscriber that exists", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "test-broker", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + WithEventTypeFilter("test-eventtype-type"), + ), + }, + extraObjects: []runtime.Object{}, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", + Namespace: "test-ns", + ProvidedEventTypes: []string{"test-ns/test-eventtype"}}, + }, + EventTypes: []*EventType{ + { + Name: "test-eventtype", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker", + ConsumedBy: []string{}, + }, + }, + }, + }, + { + name: "Ignore triggers that are bound to a subscriber that is not registered on Backstage", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "test-broker", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + WithEventTypeFilter("test-eventtype-type"), + ), + }, + extraObjects: []runtime.Object{ + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-subscriber", + Namespace: "test-ns", + Labels: map[string]string{"NO": "NO"}, + }, + }, + }, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", + Namespace: "test-ns", + ProvidedEventTypes: []string{"test-ns/test-eventtype"}}, + }, + EventTypes: []*EventType{ + { + Name: "test-eventtype", + Namespace: "test-ns", + Type: "test-eventtype-type", + Reference: "test-ns/test-broker", + ConsumedBy: []string{}, + }, + }, + }, + }, + { + name: "Trigger with no filter subscribes to all event types provided by the broker", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype-1", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type-1"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + testingv1beta2.NewEventType("test-eventtype-2", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type-2"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "test-broker", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + ), + }, + extraObjects: []runtime.Object{ + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-subscriber", + Namespace: "test-ns", + Labels: map[string]string{"backstage.io/kubernetes-id": "test-subscriber"}, + }, + }, + }, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", Namespace: "test-ns", - Type: "test-eventtype-type", - Reference: "test-ns/test-broker-2", + ProvidedEventTypes: []string{ + "test-ns/test-eventtype-1", + "test-ns/test-eventtype-2", + }}, + }, + EventTypes: []*EventType{ + { + Name: "test-eventtype-1", + Namespace: "test-ns", + Type: "test-eventtype-type-1", + Reference: "test-ns/test-broker", + ConsumedBy: []string{"test-subscriber"}, + }, + { + Name: "test-eventtype-2", + Namespace: "test-ns", + Type: "test-eventtype-type-2", + Reference: "test-ns/test-broker", + ConsumedBy: []string{"test-subscriber"}, + }, + }, + }, + }, + { + name: "Trigger has an accept all types filter subscribes to all event types provided by the broker", + brokers: []*eventingv1.Broker{ + testingv1.NewBroker("test-broker", "test-ns"), + }, + eventTypes: []*eventingv1beta2.EventType{ + testingv1beta2.NewEventType("test-eventtype-1", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type-1"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + testingv1beta2.NewEventType("test-eventtype-2", "test-ns", + testingv1beta2.WithEventTypeType("test-eventtype-type-2"), + testingv1beta2.WithEventTypeReference(brokerReference("test-broker", "test-ns")), + ), + }, + triggers: []*eventingv1.Trigger{ + testingv1.NewTrigger("test-trigger", "test-ns", "test-broker", + testingv1.WithTriggerSubscriberRef( + metav1.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }, + "test-subscriber", + "test-ns", + ), + WithEventTypeFilter(""), + ), + }, + extraObjects: []runtime.Object{ + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-subscriber", + Namespace: "test-ns", + Labels: map[string]string{"backstage.io/kubernetes-id": "test-subscriber"}, + }, + }, + }, + want: EventMesh{ + Brokers: []*Broker{ + { + Name: "test-broker", + Namespace: "test-ns", + ProvidedEventTypes: []string{ + "test-ns/test-eventtype-1", + "test-ns/test-eventtype-2", + }}, + }, + EventTypes: []*EventType{ + { + Name: "test-eventtype-1", + Namespace: "test-ns", + Type: "test-eventtype-type-1", + Reference: "test-ns/test-broker", + ConsumedBy: []string{"test-subscriber"}, + }, + { + Name: "test-eventtype-2", + Namespace: "test-ns", + Type: "test-eventtype-type-2", + Reference: "test-ns/test-broker", + ConsumedBy: []string{"test-subscriber"}, }, }, }, @@ -177,15 +522,27 @@ func TestBuildEventMesh(t *testing.T) { for _, b := range tt.brokers { v1objects = append(v1objects, b) } + for _, t := range tt.triggers { + v1objects = append(v1objects, t) + } fakelistersv1 := reconcilertestingv1.NewListers(v1objects) + sc := runtime.NewScheme() + _ = corev1.AddToScheme(sc) + + fakeDynamicClient := fake.NewSimpleDynamicClient(sc, tt.extraObjects...) + + ctx := context.TODO() + ctx = context.WithValue(ctx, dynamicclient.Key{}, fakeDynamicClient) + listers := Listers{ BrokerLister: fakelistersv1.GetBrokerLister(), EventTypeLister: fakelistersv1beta2.GetEventTypeLister(), + TriggerLister: fakelistersv1.GetTriggerLister(), } t.Run(tt.name, func(t *testing.T) { - got, err := BuildEventMesh(listers, logger) + got, err := BuildEventMesh(ctx, listers, logger) if (err != nil) != tt.error { t.Errorf("BuildEventMesh() error = %v, error %v", err, tt.error) return @@ -198,6 +555,18 @@ func TestBuildEventMesh(t *testing.T) { } } +func WithEventTypeFilter(et string) testingv1.TriggerOption { + return func(a *eventingv1.Trigger) { + if a.Spec.Filter == nil { + a.Spec.Filter = &eventingv1.TriggerFilter{} + } + if a.Spec.Filter.Attributes == nil { + a.Spec.Filter.Attributes = make(map[string]string) + } + a.Spec.Filter.Attributes["type"] = et + } +} + func brokerReference(brokerName, namespace string) *duckv1.KReference { return &duckv1.KReference{ APIVersion: "eventing.knative.dev/v1", diff --git a/backends/pkg/reconciler/eventmesh/util.go b/backends/pkg/reconciler/eventmesh/util.go index 0f90694c..ddfef881 100644 --- a/backends/pkg/reconciler/eventmesh/util.go +++ b/backends/pkg/reconciler/eventmesh/util.go @@ -3,22 +3,20 @@ package eventmesh import ( "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "knative.dev/pkg/apis/duck/v1" ) -func ObjNameAndNamespace(obj metav1.ObjectMetaAccessor) string { - return NameAndNamespace(obj.GetObjectMeta().GetNamespace(), obj.GetObjectMeta().GetName()) -} - -func RefNameAndNamespace(ref *v1.KReference) string { +// NamespacedRefName returns the namespaced name of the given reference. +// If the reference is nil, it returns an empty string. +// It returns the namespaced name in the format "namespace/name". +func NamespacedRefName(ref *v1.KReference) string { if ref == nil { return "" } - return NameAndNamespace(ref.Namespace, ref.Name) + return NamespacedName(ref.Namespace, ref.Name) } -func NameAndNamespace(namespace, name string) string { +// NamespacedName returns the namespaced name in the format "namespace/name". +func NamespacedName(namespace, name string) string { return fmt.Sprintf("%s/%s", namespace, name) } diff --git a/backstage/packages/backend/package.json b/backstage/packages/backend/package.json index 935a797b..e657370b 100644 --- a/backstage/packages/backend/package.json +++ b/backstage/packages/backend/package.json @@ -36,7 +36,7 @@ "@backstage/plugin-search-backend-module-techdocs": "^0.1.11", "@backstage/plugin-search-backend-node": "^1.2.11", "@backstage/plugin-techdocs-backend": "^1.9.0", - "@knative-extensions/plugin-knative-event-mesh-backend": "0.0.0-nightly", + "@knative-extensions/plugin-knative-event-mesh-backend": "link:../../plugins/knative-event-mesh-backend", "app": "link:../app", "better-sqlite3": "^9.0.0", "dockerode": "^3.3.1", diff --git a/backstage/packages/backend/src/plugins/catalog.ts b/backstage/packages/backend/src/plugins/catalog.ts index 198b8f2c..1cdb46a3 100644 --- a/backstage/packages/backend/src/plugins/catalog.ts +++ b/backstage/packages/backend/src/plugins/catalog.ts @@ -1,8 +1,12 @@ +import {CatalogClient} from "@backstage/catalog-client"; import { CatalogBuilder } from '@backstage/plugin-catalog-backend'; import { ScaffolderEntitiesProcessor } from '@backstage/plugin-catalog-backend-module-scaffolder-entity-model'; import { Router } from 'express'; import { PluginEnvironment } from '../types'; -import { KnativeEventMeshProvider } from '@knative-extensions/plugin-knative-event-mesh-backend'; +import { + KnativeEventMeshProcessor, + KnativeEventMeshProvider +} from '@knative-extensions/plugin-knative-event-mesh-backend'; export default async function createPlugin( env: PluginEnvironment, @@ -16,6 +20,13 @@ export default async function createPlugin( }); builder.addEntityProvider(knativeEventMeshProviders); + const catalogApi = new CatalogClient({ + discoveryApi: env.discovery, + }); + + const knativeEventMeshProcessor = new KnativeEventMeshProcessor(catalogApi, env.logger); + builder.addProcessor(knativeEventMeshProcessor); + const { processingEngine, router } = await builder.build(); await processingEngine.start(); diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/index.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/index.ts index cf50add7..54c56c9c 100644 --- a/backstage/plugins/knative-event-mesh-backend/src/providers/index.ts +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/index.ts @@ -1 +1,2 @@ export { KnativeEventMeshProvider } from './knativeEventMeshProvider'; +export {KnativeEventMeshProcessor} from './knativeEventMeshProcessor'; diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.test.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.test.ts new file mode 100644 index 00000000..ae7ab4a8 --- /dev/null +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.test.ts @@ -0,0 +1,216 @@ +import {getVoidLogger} from '@backstage/backend-common'; +import {CatalogClient} from "@backstage/catalog-client"; +import {ApiEntity, Entity} from "@backstage/catalog-model"; +import {CatalogProcessorRelationResult} from "@backstage/plugin-catalog-node"; +import {KnativeEventMeshProcessor} from "./knativeEventMeshProcessor"; + +// there must be a better way to do this +const catalogApi = { + queryEntities: jest.fn(), +} as jest.Mocked; + +beforeEach(() => { + catalogApi.queryEntities.mockClear(); +}); + +describe('KnativeEventMeshProcessor', () => { + const logger = getVoidLogger(); + + describe('preProcessEntity', () => { + + const processor = new KnativeEventMeshProcessor(catalogApi, logger); + + type TestCase = { + name:string; + entity:ApiEntity; + query?:{ + queryEntitiesRequest:{ + filter:{ + kind:'component', + 'metadata.namespace':string, + 'metadata.annotations.backstage.io/kubernetes-id':string, + }, + }, + queryEntitiesResult:Entity[]; + }; + expectedRelations?:CatalogProcessorRelationResult[]; + }; + + const testCases:TestCase[] = [ + { + name: 'should emit relations if consumer is defined and found', + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'API', + metadata: { + namespace: 'default', + name: 'et-1', + consumedBy: ['consumer-1'], + }, + spec: { + owner: 'owner', + system: 'system', + lifecycle: 'lifecycle', + definition: 'definition', + type: 'eventType', + }, + }, + query: { + queryEntitiesRequest: { + filter: { + kind: 'component', + 'metadata.namespace': 'default', + 'metadata.annotations.backstage.io/kubernetes-id': 'consumer-1', + }, + }, + queryEntitiesResult: [{ + apiVersion: 'backstage.io/v1alpha1', + kind: 'component', + metadata: { + namespace: 'default', + name: 'consumer-1', + }, + }], + }, + expectedRelations: [ + { + type: 'relation', + relation: { + type: 'apiConsumedBy', + source: { + kind: 'API', + namespace: 'default', + name: 'et-1', + }, + target: { + kind: 'Component', + namespace: 'default', + name: 'consumer-1', + }, + }, + }, + { + type: 'relation', + relation: { + type: 'consumesApi', + source: { + kind: 'Component', + namespace: 'default', + name: 'consumer-1', + }, + target: { + kind: 'API', + namespace: 'default', + name: 'et-1', + }, + }, + }, + ], + }, + { + "name": "should not emit relations if entity is not Knative Event Type", + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'API', + metadata: { + namespace: 'default', + name: 'et-1', + }, + spec: { + owner: 'owner', + system: 'system', + lifecycle: 'lifecycle', + definition: 'definition', + type: 'RANDOM', + }, + }, + }, + { + "name": "should not emit relations if there's no consumer defined", + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'API', + metadata: { + namespace: 'default', + name: 'et-1', + consumedBy: [], + }, + spec: { + owner: 'owner', + system: 'system', + lifecycle: 'lifecycle', + definition: 'definition', + type: 'eventType', + }, + }, + }, + { + name: 'should not emit relations if consumer is defined but cannot be found', + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'API', + metadata: { + namespace: 'default', + name: 'et-1', + consumedBy: ['consumer-1'], + }, + spec: { + owner: 'owner', + system: 'system', + lifecycle: 'lifecycle', + definition: 'definition', + type: 'eventType', + }, + }, + query: { + queryEntitiesRequest: { + filter: { + kind: 'component', + 'metadata.namespace': 'default', + 'metadata.annotations.backstage.io/kubernetes-id': 'consumer-1', + }, + }, + queryEntitiesResult: [], + }, + expectedRelations: [], + }, + ]; + + for (const testCase of testCases) { + it(testCase.name, async () => { + if (testCase.query) { + let entityQueryResult = { + items: testCase.query.queryEntitiesResult, + totalItems: testCase.query.queryEntitiesResult.length, + pageInfo: {} + }; + + catalogApi.queryEntities.mockReturnValue(Promise.resolve(entityQueryResult)); + } + + const emitFn = jest.fn(); + + let output = await processor.preProcessEntity(testCase.entity, {}, emitFn, {}, {}); + + if (!testCase.expectedRelations) { + expect(emitFn).not.toHaveBeenCalled(); + } else { + expect(emitFn).toHaveBeenCalledTimes(testCase.expectedRelations.length); + for (let i = 0; i < testCase.expectedRelations.length; i++) { + const relation = testCase.expectedRelations[i]; + expect(emitFn).toHaveBeenNthCalledWith(i + 1, relation); + } + } + + expect(output).toEqual(testCase.entity); + + if (testCase.query) { + expect(catalogApi.queryEntities).toHaveBeenCalledTimes(1); + expect(catalogApi.queryEntities).toHaveBeenCalledWith(testCase.query.queryEntitiesRequest); + } else { + expect(catalogApi.queryEntities).not.toHaveBeenCalled(); + } + }); + } + }); +}); diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.ts new file mode 100644 index 00000000..21c80fdf --- /dev/null +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProcessor.ts @@ -0,0 +1,117 @@ +import {CatalogClient} from '@backstage/catalog-client'; +import {ComponentEntity, Entity} from '@backstage/catalog-model'; +import {LocationSpec} from '@backstage/plugin-catalog-common'; +import { + CatalogProcessor, + CatalogProcessorCache, + CatalogProcessorEmit, + CatalogProcessorRelationResult, +} from '@backstage/plugin-catalog-node'; +import {Logger} from "winston"; +import {TypeKnativeEvent} from "./types"; + + +export class KnativeEventMeshProcessor implements CatalogProcessor { + private readonly catalogApi:CatalogClient; + private readonly logger:Logger; + + constructor(catalogApi:CatalogClient, logger:Logger) { + this.catalogApi = catalogApi; + + this.logger = logger.child({ + target: this.getProcessorName(), + }); + } + + getProcessorName():string { + return "knative-event-mesh-processor"; + } + + async preProcessEntity(entity:Entity, _location:LocationSpec, emit:CatalogProcessorEmit, _originLocation:LocationSpec, _cache:CatalogProcessorCache):Promise { + if (entity.kind === 'API' && entity.spec?.type === TypeKnativeEvent) { + this.logger.debug(`Processing KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name}`); + + // if there's no relation to build, return entity as is + if (!entity.metadata.consumedBy) { + this.logger.debug(`No consumers defined for KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name}`); + return entity; + } + + const consumers = entity.metadata.consumedBy as string[]; + this.logger.debug(`Consumers defined for KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name}: ${consumers.join(', ')}`); + + // build relations + for (const consumedBy of consumers) { + this.logger.debug(`Building relations for KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name} to consumer ${consumedBy}`); + + // query the catalog for the component with the id + const consumerComponents = await this.findComponentsByBackstageId(entity.metadata.namespace as string, consumedBy); + this.logger.debug(`Found ${consumerComponents.length} components for KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name} to consumer ${consumedBy}`); + + for (const component of consumerComponents) { + this.logger.debug(`Emitting relations for KnativeEventType entity ${entity.metadata.namespace}/${entity.metadata.name} for consumer ${consumedBy} via component ${component.metadata.namespace}/${component.metadata.name}`); + + // emit a relation from the API to the component + const apiToComponentRelation:CatalogProcessorRelationResult = { + type: 'relation', + relation: { + type: 'apiConsumedBy', + source: { + kind: 'API', + namespace: entity.metadata.namespace as string, + name: entity.metadata.name, + }, + target: { + kind: 'Component', + namespace: component.metadata.namespace as string, + name: component.metadata.name, + }, + }, + }; + emit(apiToComponentRelation); + + // emit a relation from the component to the API + const componentToApiRelation:CatalogProcessorRelationResult = { + type: 'relation', + relation: { + type: 'consumesApi', + source: { + kind: 'Component', + namespace: component.metadata.namespace as string, + name: component.metadata.name, + }, + target: { + kind: 'API', + namespace: entity.metadata.namespace as string, + name: entity.metadata.name, + }, + }, + }; + emit(componentToApiRelation); + } + } + } + return entity; + } + + private async findComponentsByBackstageId(namespace:string, componentId:string) { + // fetch the component by the id + // example: http://localhost:7007/api/catalog/entities/by-query + // ?filter=kind=component,metadata.namespace=default,metadata.annotations.backstage.io/kubernetes-id=fraud-detector + + try { + let response = await this.catalogApi.queryEntities({ + filter: { + kind: 'component', + 'metadata.namespace': namespace, + 'metadata.annotations.backstage.io/kubernetes-id': componentId, + }, + }); + + return response.items as ComponentEntity[]; + } catch (e) { + this.logger.error(`Failed to find components by backstage id ${namespace}/${componentId}: ${e}`); + return [] as ComponentEntity[]; + } + } +} diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.test.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.test.ts index 4a2a1bb7..35d3f954 100644 --- a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.test.ts +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.test.ts @@ -48,6 +48,7 @@ describe('KnativeEventMeshProvider', () => { links: [], uid: undefined, etag: undefined, + consumedBy: [], }, spec: { type: 'eventType', @@ -74,6 +75,7 @@ describe('KnativeEventMeshProvider', () => { }, schemaData: 'test-schema-data', schemaURL: 'http://test-schema-url', + consumedBy: ["test-consumer1", "test-consumer2"], }, expected: { apiVersion: 'backstage.io/v1alpha1', @@ -101,6 +103,7 @@ describe('KnativeEventMeshProvider', () => { ], uid: undefined, etag: undefined, + consumedBy: ["test-consumer1", "test-consumer2"], }, spec: { type: 'eventType', diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.ts index a9e8bf50..c54c1f3a 100644 --- a/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.ts +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/knativeEventMeshProvider.ts @@ -14,7 +14,13 @@ import {EntityProvider, EntityProviderConnection,} from '@backstage/plugin-catal import {Logger} from 'winston'; import {readKnativeEventMeshProviderConfigs} from "./config"; -import {KnativeEventMeshProviderConfig} from "./types"; +import { + KnativeEventMeshProviderConfig, + OwnerKnative, + SystemKnative, + TypeKnativeBroker, + TypeKnativeEvent +} from "./types"; export type EventType = { name:string; @@ -26,6 +32,7 @@ export type EventType = { schemaURL?:string; labels?:Record; annotations?:Record; + consumedBy?:string[]; }; export type Broker = { @@ -139,7 +146,7 @@ export class KnativeEventMeshProvider implements EntityProvider { } getProviderName():string { - return `knative-event-mesh-${this.env}`; + return `knative-event-mesh-provider-${this.env}`; } async connect(connection:EntityProviderConnection):Promise { @@ -207,13 +214,17 @@ export class KnativeEventMeshProvider implements EntityProvider { // we don't use tags tags: [], links: links, - title: `${eventType.type} - (${eventType.namespace}/${eventType.name})` + title: `${eventType.type} - (${eventType.namespace}/${eventType.name})`, + // custom field, stored + // see https://backstage.io/docs/features/software-catalog/extending-the-model#adding-new-fields-to-the-metadata-object + // can't make it type safe as the Metadata type is not exported + consumedBy: eventType.consumedBy ?? [], }, spec: { - type: 'eventType', + type: TypeKnativeEvent, lifecycle: this.env, - system: 'knative-event-mesh', - owner: 'knative', + system: SystemKnative, + owner: OwnerKnative, definition: eventType.schemaData || "{}", }, }; @@ -235,10 +246,10 @@ export class KnativeEventMeshProvider implements EntityProvider { tags: [], }, spec: { - type: 'broker', + type: TypeKnativeBroker, lifecycle: this.env, - system: 'knative-event-mesh', - owner: 'knative', + system: SystemKnative, + owner: OwnerKnative, providesApis: !broker.providedEventTypes ? [] : broker.providedEventTypes.map((eventType:string) => `api:${eventType}`), } } diff --git a/backstage/plugins/knative-event-mesh-backend/src/providers/types.ts b/backstage/plugins/knative-event-mesh-backend/src/providers/types.ts index 216e1dd6..13ffabe3 100644 --- a/backstage/plugins/knative-event-mesh-backend/src/providers/types.ts +++ b/backstage/plugins/knative-event-mesh-backend/src/providers/types.ts @@ -5,3 +5,8 @@ export type KnativeEventMeshProviderConfig = { baseUrl:string; schedule?:TaskScheduleDefinition; }; + +export const TypeKnativeEvent = 'eventType'; +export const TypeKnativeBroker = 'broker'; +export const SystemKnative = 'knative-event-mesh'; +export const OwnerKnative = 'knative'; diff --git a/backstage/yarn.lock b/backstage/yarn.lock index 03702f01..275254b0 100644 --- a/backstage/yarn.lock +++ b/backstage/yarn.lock @@ -4683,6 +4683,18 @@ dependencies: ioredis "^5.3.2" +"@knative-extensions/plugin-knative-event-mesh-backend@link:plugins/knative-event-mesh-backend": + version "0.0.0-nightly" + dependencies: + "@backstage/backend-common" "^0.19.9" + "@backstage/config" "^1.1.1" + "@types/express" "*" + express "^4.17.1" + express-promise-router "^4.1.0" + node-fetch "^2.6.7" + winston "^3.2.1" + yn "^4.0.0" + "@kubernetes/client-node@0.19.0": version "0.19.0" resolved "https://registry.yarnpkg.com/@kubernetes/client-node/-/client-node-0.19.0.tgz#ebd2121e5c8dc1a47ff1b2574bda1e760d0abb82" diff --git a/go.mod b/go.mod index 52e19174..dbbeb5ef 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.18 require ( github.com/google/go-cmp v0.6.0 github.com/gorilla/mux v1.8.1 + github.com/hashicorp/golang-lru v1.0.2 go.uber.org/zap v1.26.0 k8s.io/apimachinery v0.27.6 knative.dev/eventing v0.39.1 @@ -44,7 +45,6 @@ require ( github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3 // indirect github.com/google/uuid v1.4.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/imdario/mergo v0.3.9 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 00000000..f694f3c0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@com_github_golang_glog//:glog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 00000000..b5140a3c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@go_googleapis//google/rpc:status_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 00000000..b8940946 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100644 index 00000000..155648ac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - saad-ali + - janetkuo + - tallclair + - dims + - cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100644 index 00000000..1e1330ff --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - ncdc + - dims diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100644 index 00000000..d1c9f530 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - derekwaynecarr + - mikedanese + - saad-ali + - janetkuo diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100644 index 00000000..e7e5c152 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - sttts + - luxas + - janetkuo + - justinsb + - ncdc + - soltysh + - dims diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 00000000..349bc69d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 00000000..73244449 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse + - pwittrock +reviewers: + - apelisse +emeritus_approvers: + - mengqiy diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 00000000..349bc69d --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 00000000..e6100942 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 00000000..4dfbb98a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: + - sig-auth-authenticators-approvers + - sig-auth-authenticators-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100644 index 00000000..7b23294c --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - caesarxuchao + - wojtek-t + - deads2k + - liggitt + - sttts + - luxas + - dims + - cjcullen + - lojies diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS new file mode 100644 index 00000000..c4ea6463 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-authenticators-approvers +reviewers: + - sig-auth-authenticators-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 00000000..726205b3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - ncdc +reviewers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund + - ncdc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS new file mode 100644 index 00000000..908bdacd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - mikedanese +reviewers: + - wojtek-t + - deads2k + - mikedanese + - ingvagabund +emeritus_approvers: + - timothysc diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100644 index 00000000..2c9488a5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - wojtek-t + - jayunit100 diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 00000000..8105c4fe --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - sig-instrumentation-reviewers +approvers: + - sig-instrumentation-approvers diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100644 index 00000000..34adee5e --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - smarterclayton + - wojtek-t + - deads2k + - liggitt + - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 00000000..3c3b94c5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-certificates-approvers +reviewers: + - sig-auth-certificates-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 00000000..e6d229d5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,6 @@ +approvers: + - sig-auth-certificates-approvers +reviewers: + - sig-auth-certificates-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 00000000..75736b5a --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - caesarxuchao diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS new file mode 100644 index 00000000..05162820 --- /dev/null +++ b/vendor/k8s.io/code-generator/OWNERS @@ -0,0 +1,15 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - lavalamp + - wojtek-t + - sttts +reviewers: + - deads2k + - lavalamp + - wojtek-t + - sttts +labels: + - sig/api-machinery + - area/code-generation diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS new file mode 100644 index 00000000..0170a84e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - lavalamp + - wojtek-t + - caesarxuchao +reviewers: + - lavalamp + - wojtek-t + - caesarxuchao diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS new file mode 100644 index 00000000..af7e2ec4 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - smarterclayton +reviewers: + - smarterclayton diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 00000000..380e514f --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS new file mode 100644 index 00000000..a2fe8f35 --- /dev/null +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - harshanarayana + - pohly +approvers: + - dims + - thockin + - serathius +emeritus_approvers: + - brancz + - justinsb + - lavalamp + - piosz + - tallclair diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100644 index 00000000..235bc545 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 00000000..9621a6a3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 00000000..0d639275 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/knative.dev/eventing/hack/OWNERS b/vendor/knative.dev/eventing/hack/OWNERS new file mode 100644 index 00000000..65aa9e7b --- /dev/null +++ b/vendor/knative.dev/eventing/hack/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-writers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/channelable_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/channelable_types.go new file mode 100644 index 00000000..a87ec65d --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/channelable_types.go @@ -0,0 +1,152 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// +genduck +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Channelable is a skeleton type wrapping Subscribable and Addressable in the manner we expect resource writers +// defining compatible resources to embed it. We will typically use this type to deserialize +// Channelable ObjectReferences and access their subscription and address data. This is not a real resource. +type Channelable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the part where the Channelable fulfills the Subscribable contract. + Spec ChannelableSpec `json:"spec,omitempty"` + + Status ChannelableStatus `json:"status,omitempty"` +} + +// ChannelableSpec contains Spec of the Channelable object +type ChannelableSpec struct { + SubscribableSpec `json:",inline"` + + // DeliverySpec contains options controlling the event delivery + // +optional + Delivery *DeliverySpec `json:"delivery,omitempty"` +} + +// ChannelableStatus contains the Status of a Channelable object. +type ChannelableStatus struct { + // inherits duck/v1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. + // * Conditions - the latest available observations of a resource's current state. + duckv1.Status `json:",inline"` + // AddressStatus is the part where the Channelable fulfills the Addressable contract. + duckv1.AddressStatus `json:",inline"` + // Subscribers is populated with the statuses of each of the Channelable's subscribers. + SubscribableStatus `json:",inline"` + // DeadLetterChannel is a KReference and is set by the channel when it supports native error handling via a channel + // Failed messages are delivered here. + // +optional + DeadLetterChannel *duckv1.KReference `json:"deadLetterChannel,omitempty"` +} + +var ( + // Verify Channelable resources meet duck contracts. + _ duck.Populatable = (*Channelable)(nil) + _ duck.Implementable = (*Channelable)(nil) + _ apis.Listable = (*Channelable)(nil) +) + +// Populate implements duck.Populatable +func (c *Channelable) Populate() { + c.Spec.SubscribableSpec = SubscribableSpec{ + // Populate ALL fields + Subscribers: []SubscriberSpec{{ + UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1", + Generation: 1, + SubscriberURI: apis.HTTP("call1"), + ReplyURI: apis.HTTP("sink2"), + }, { + UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1", + Generation: 2, + SubscriberURI: apis.HTTP("call2"), + ReplyURI: apis.HTTP("sink2"), + }}, + } + retry := int32(5) + linear := BackoffPolicyLinear + delay := "5s" + c.Spec.Delivery = &DeliverySpec{ + DeadLetterSink: &duckv1.Destination{ + Ref: &duckv1.KReference{ + Name: "aname", + }, + URI: &apis.URL{ + Scheme: "http", + Host: "test-error-domain", + }, + }, + Retry: &retry, + BackoffPolicy: &linear, + BackoffDelay: &delay, + } + c.Status = ChannelableStatus{ + AddressStatus: duckv1.AddressStatus{ + Address: &duckv1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: "test-domain", + }, + }, + }, + SubscribableStatus: SubscribableStatus{ + Subscribers: []SubscriberStatus{{ + UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1", + ObservedGeneration: 1, + Ready: corev1.ConditionTrue, + Message: "Some message", + }, { + UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1", + ObservedGeneration: 2, + Ready: corev1.ConditionFalse, + Message: "Some message", + }}, + }, + } +} + +// GetFullType implements duck.Implementable +func (s *Channelable) GetFullType() duck.Populatable { + return &Channelable{} +} + +// GetListType implements apis.Listable +func (c *Channelable) GetListType() runtime.Object { + return &ChannelableList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ChannelableList is a list of Channelable resources. +type ChannelableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Channelable `json:"items"` +} diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_conversion.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_conversion.go new file mode 100644 index 00000000..b6ec9033 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_conversion.go @@ -0,0 +1,79 @@ +/* +Copyright 2020 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" + + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +// ConvertTo implements apis.Convertible +func (source *DeliverySpec) ConvertTo(ctx context.Context, to apis.Convertible) error { + switch sink := to.(type) { + case *eventingduckv1.DeliverySpec: + sink.Retry = source.Retry + sink.BackoffDelay = source.BackoffDelay + sink.Timeout = source.Timeout + if source.BackoffPolicy != nil { + if *source.BackoffPolicy == BackoffPolicyLinear { + linear := eventingduckv1.BackoffPolicyLinear + sink.BackoffPolicy = &linear + } else if *source.BackoffPolicy == BackoffPolicyExponential { + exponential := eventingduckv1.BackoffPolicyExponential + sink.BackoffPolicy = &exponential + } else { + return fmt.Errorf("unknown BackoffPolicy, got: %q", *source.BackoffPolicy) + } + } + sink.DeadLetterSink = source.DeadLetterSink + return nil + default: + return fmt.Errorf("unknown version, got: %T", sink) + } +} + +// ConvertFrom implements apis.Convertible +func (sink *DeliverySpec) ConvertFrom(ctx context.Context, from apis.Convertible) error { + switch source := from.(type) { + case *eventingduckv1.DeliverySpec: + sink.Retry = source.Retry + sink.BackoffDelay = source.BackoffDelay + sink.Timeout = source.Timeout + if source.BackoffPolicy != nil { + if *source.BackoffPolicy == eventingduckv1.BackoffPolicyLinear { + linear := BackoffPolicyLinear + sink.BackoffPolicy = &linear + } else if *source.BackoffPolicy == eventingduckv1.BackoffPolicyExponential { + exponential := BackoffPolicyExponential + sink.BackoffPolicy = &exponential + } else { + return fmt.Errorf("unknown BackoffPolicy, got: %q", *source.BackoffPolicy) + } + + } + sink.DeadLetterSink = source.DeadLetterSink + return nil + default: + return fmt.Errorf("unknown version, got: %T", source) + } +} + +// DeliveryStatus v1beta1 is not convertable to v1 (Channel ref type vs URL) diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_types.go new file mode 100644 index 00000000..33ebd3ae --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/delivery_types.go @@ -0,0 +1,118 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + + "github.com/rickb777/date/period" +) + +// DeliverySpec contains the delivery options for event senders, +// such as channelable and source. +type DeliverySpec struct { + // DeadLetterSink is the sink receiving event that could not be sent to + // a destination. + // +optional + DeadLetterSink *duckv1.Destination `json:"deadLetterSink,omitempty"` + + // Retry is the minimum number of retries the sender should attempt when + // sending an event before moving it to the dead letter sink. + // +optional + Retry *int32 `json:"retry,omitempty"` + + // Timeout is the timeout of each single request. + // More information on Duration format: + // - https://www.iso.org/iso-8601-date-and-time-format.html + // - https://en.wikipedia.org/wiki/ISO_8601 + // + Timeout *string `json:"timeout,omitempty"` + + // BackoffPolicy is the retry backoff policy (linear, exponential). + // +optional + BackoffPolicy *BackoffPolicyType `json:"backoffPolicy,omitempty"` + + // BackoffDelay is the delay before retrying. + // More information on Duration format: + // - https://www.iso.org/iso-8601-date-and-time-format.html + // - https://en.wikipedia.org/wiki/ISO_8601 + // + // For linear policy, backoff delay is backoffDelay*. + // For exponential policy, backoff delay is backoffDelay*2^. + // +optional + BackoffDelay *string `json:"backoffDelay,omitempty"` +} + +func (ds *DeliverySpec) Validate(ctx context.Context) *apis.FieldError { + if ds == nil { + return nil + } + var errs *apis.FieldError + if dlse := ds.DeadLetterSink.Validate(ctx); dlse != nil { + errs = errs.Also(dlse).ViaField("deadLetterSink") + } + + if ds.Retry != nil && *ds.Retry < 0 { + errs = errs.Also(apis.ErrInvalidValue(*ds.Retry, "retry")) + } + + if ds.Timeout != nil { + _, te := period.Parse(*ds.Timeout) + if te != nil { + errs = errs.Also(apis.ErrInvalidValue(*ds.Timeout, "timeout")) + } + } + + if ds.BackoffPolicy != nil { + switch *ds.BackoffPolicy { + case BackoffPolicyExponential, BackoffPolicyLinear: + // nothing + default: + errs = errs.Also(apis.ErrInvalidValue(*ds.BackoffPolicy, "backoffPolicy")) + } + } + + if ds.BackoffDelay != nil { + _, te := period.Parse(*ds.BackoffDelay) + if te != nil { + errs = errs.Also(apis.ErrInvalidValue(*ds.BackoffDelay, "backoffDelay")) + } + } + return errs +} + +// BackoffPolicyType is the type for backoff policies +type BackoffPolicyType string + +const ( + // Linear backoff policy + BackoffPolicyLinear BackoffPolicyType = "linear" + + // Exponential backoff policy + BackoffPolicyExponential BackoffPolicyType = "exponential" +) + +// DeliveryStatus contains the Status of an object supporting delivery options. +type DeliveryStatus struct { + // DeadLetterChannel is a KReference that is the reference to the native, platform specific channel + // where failed events are sent to. + // +optional + DeadLetterChannel *duckv1.KReference `json:"deadLetterChannel,omitempty"` +} diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/doc.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/doc.go new file mode 100644 index 00000000..08ba5444 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource + +// Package v1beta1 is the v1beta1 version of the API. +// +k8s:deepcopy-gen=package +// +groupName=duck.knative.dev +package v1beta1 diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types.go new file mode 100644 index 00000000..db7f9fa6 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types.go @@ -0,0 +1,162 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// +genduck + +var _ duck.Implementable = (*Subscribable)(nil) + +// SubscriberSpec defines a single subscriber to a Subscribable. +// +// At least one of SubscriberURI and ReplyURI must be present +type SubscriberSpec struct { + // UID is used to understand the origin of the subscriber. + // +optional + UID types.UID `json:"uid,omitempty"` + // Generation of the origin of the subscriber with uid:UID. + // +optional + Generation int64 `json:"generation,omitempty"` + // SubscriberURI is the endpoint for the subscriber + // +optional + SubscriberURI *apis.URL `json:"subscriberUri,omitempty"` + // ReplyURI is the endpoint for the reply + // +optional + ReplyURI *apis.URL `json:"replyUri,omitempty"` + // +optional + // DeliverySpec contains options controlling the event delivery + // +optional + Delivery *DeliverySpec `json:"delivery,omitempty"` +} + +// SubscriberStatus defines the status of a single subscriber to a Channel. +type SubscriberStatus struct { + // UID is used to understand the origin of the subscriber. + // +optional + UID types.UID `json:"uid,omitempty"` + // Generation of the origin of the subscriber with uid:UID. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Status of the subscriber. + Ready corev1.ConditionStatus `json:"ready,omitempty"` + // A human readable message indicating details of Ready status. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Subscribable is a skeleton type wrapping Subscribable in the manner we expect resource writers +// defining compatible resources to embed it. We will typically use this type to deserialize +// SubscribableType ObjectReferences and access the Subscription data. This is not a real resource. +type Subscribable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // SubscribableSpec is the part where Subscribable object is + // configured as to be compatible with Subscribable contract. + Spec SubscribableSpec `json:"spec"` + + // SubscribableStatus is the part where SubscribableStatus object is + // configured as to be compatible with Subscribable contract. + Status SubscribableStatus `json:"status"` +} + +// SubscribableSpec shows how we expect folks to embed Subscribable in their Spec field. +type SubscribableSpec struct { + // This is the list of subscriptions for this subscribable. + // +patchMergeKey=uid + // +patchStrategy=merge + Subscribers []SubscriberSpec `json:"subscribers,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} + +// SubscribableStatus is the schema for the subscribable's status portion of the status +// section of the resource. +type SubscribableStatus struct { + // This is the list of subscription's statuses for this channel. + // +patchMergeKey=uid + // +patchStrategy=merge + Subscribers []SubscriberStatus `json:"subscribers,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} + +var ( + // Verify SubscribableType resources meet duck contracts. + _ duck.Populatable = (*Subscribable)(nil) + _ apis.Listable = (*Subscribable)(nil) + + _ apis.Convertible = (*Subscribable)(nil) + _ apis.Convertible = (*SubscribableSpec)(nil) + _ apis.Convertible = (*SubscribableStatus)(nil) + + _ apis.Convertible = (*SubscriberSpec)(nil) + _ apis.Convertible = (*SubscriberStatus)(nil) +) + +// GetFullType implements duck.Implementable +func (s *Subscribable) GetFullType() duck.Populatable { + return &Subscribable{} +} + +// Populate implements duck.Populatable +func (c *Subscribable) Populate() { + c.Spec.Subscribers = []SubscriberSpec{{ + UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1", + Generation: 1, + SubscriberURI: apis.HTTP("call1"), + ReplyURI: apis.HTTP("sink2"), + }, { + UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1", + Generation: 2, + SubscriberURI: apis.HTTP("call2"), + ReplyURI: apis.HTTP("sink2"), + }} + c.Status.Subscribers = // Populate ALL fields + []SubscriberStatus{{ + UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1", + ObservedGeneration: 1, + Ready: corev1.ConditionTrue, + Message: "Some message", + }, { + UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1", + ObservedGeneration: 2, + Ready: corev1.ConditionFalse, + Message: "Some message", + }} +} + +// GetListType implements apis.Listable +func (c *Subscribable) GetListType() runtime.Object { + return &SubscribableList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubscribableTypeList is a list of SubscribableType resources +type SubscribableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Subscribable `json:"items"` +} diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types_conversion.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types_conversion.go new file mode 100644 index 00000000..30f17c3b --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/subscribable_types_conversion.go @@ -0,0 +1,200 @@ +/* +Copyright 2020 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" + + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +// ConvertTo implements apis.Convertible +func (source *Subscribable) ConvertTo(ctx context.Context, to apis.Convertible) error { + switch sink := to.(type) { + case *eventingduckv1.Subscribable: + sink.ObjectMeta = source.ObjectMeta + if err := source.Status.ConvertTo(ctx, &sink.Status); err != nil { + return err + } + if err := source.Spec.ConvertTo(ctx, &sink.Spec); err != nil { + return err + } + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertTo helps implement apis.Convertible +func (source *SubscribableSpec) ConvertTo(ctx context.Context, obj apis.Convertible) error { + switch sink := obj.(type) { + case *eventingduckv1.SubscribableSpec: + if len(source.Subscribers) > 0 { + sink.Subscribers = make([]eventingduckv1.SubscriberSpec, len(source.Subscribers)) + for i, s := range source.Subscribers { + if err := s.ConvertTo(ctx, &sink.Subscribers[i]); err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertTo implements apis.Convertible +func (source *SubscriberSpec) ConvertTo(ctx context.Context, obj apis.Convertible) error { + switch sink := obj.(type) { + case *eventingduckv1.SubscriberSpec: + sink.UID = source.UID + sink.Generation = source.Generation + sink.SubscriberURI = source.SubscriberURI + if source.Delivery != nil { + sink.Delivery = &eventingduckv1.DeliverySpec{} + if err := source.Delivery.ConvertTo(ctx, sink.Delivery); err != nil { + return err + } + } + sink.ReplyURI = source.ReplyURI + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertTo implements apis.Convertible +func (source *SubscribableStatus) ConvertTo(ctx context.Context, obj apis.Convertible) error { + switch sink := obj.(type) { + case *eventingduckv1.SubscribableStatus: + if len(source.Subscribers) > 0 { + sink.Subscribers = make([]eventingduckv1.SubscriberStatus, len(source.Subscribers)) + for i, ss := range source.Subscribers { + sink.Subscribers[i] = eventingduckv1.SubscriberStatus{} + if err := ss.ConvertTo(ctx, &sink.Subscribers[i]); err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertTo implements apis.Convertible +func (source *SubscriberStatus) ConvertTo(ctx context.Context, obj apis.Convertible) error { + switch sink := obj.(type) { + case *eventingduckv1.SubscriberStatus: + sink.UID = source.UID + sink.ObservedGeneration = source.ObservedGeneration + sink.Ready = source.Ready + sink.Message = source.Message + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertFrom implements apis.Convertible. +func (sink *Subscribable) ConvertFrom(ctx context.Context, from apis.Convertible) error { + switch source := from.(type) { + case *eventingduckv1.Subscribable: + sink.ObjectMeta = source.ObjectMeta + if err := sink.Status.ConvertFrom(ctx, &source.Status); err != nil { + return err + } + if err := sink.Spec.ConvertFrom(ctx, &source.Spec); err != nil { + return err + } + default: + return fmt.Errorf("unknown version, got: %T", source) + } + return nil +} + +// ConvertFrom helps implement apis.Convertible +func (sink *SubscribableSpec) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + switch source := obj.(type) { + case *eventingduckv1.SubscribableSpec: + if len(source.Subscribers) > 0 { + sink.Subscribers = make([]SubscriberSpec, len(source.Subscribers)) + for i := range source.Subscribers { + if err := sink.Subscribers[i].ConvertFrom(ctx, &source.Subscribers[i]); err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown version, got: %T", source) + } + return nil +} + +// ConvertFrom helps implement apis.Convertible +func (sink *SubscriberSpec) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + switch source := obj.(type) { + case *eventingduckv1.SubscriberSpec: + sink.UID = source.UID + sink.Generation = source.Generation + sink.SubscriberURI = source.SubscriberURI + sink.ReplyURI = source.ReplyURI + if source.Delivery != nil { + sink.Delivery = &DeliverySpec{} + return sink.Delivery.ConvertFrom(ctx, source.Delivery) + } + default: + return fmt.Errorf("unknown version, got: %T", source) + } + return nil +} + +// ConvertFrom implements apis.Convertible +func (sink *SubscribableStatus) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + switch source := obj.(type) { + case *eventingduckv1.SubscribableStatus: + if len(source.Subscribers) > 0 { + sink.Subscribers = make([]SubscriberStatus, len(source.Subscribers)) + for i := range source.Subscribers { + sink.Subscribers[i] = SubscriberStatus{} + if err := sink.Subscribers[i].ConvertFrom(ctx, &source.Subscribers[i]); err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} + +// ConvertFrom implements apis.Convertible +func (sink *SubscriberStatus) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + switch source := obj.(type) { + case *eventingduckv1.SubscriberStatus: + sink.UID = source.UID + sink.ObservedGeneration = source.ObservedGeneration + sink.Ready = source.Ready + sink.Message = source.Message + default: + return fmt.Errorf("unknown version, got: %T", sink) + } + return nil +} diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..fad633c8 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,349 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" + v1 "knative.dev/pkg/apis/duck/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Channelable) DeepCopyInto(out *Channelable) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Channelable. +func (in *Channelable) DeepCopy() *Channelable { + if in == nil { + return nil + } + out := new(Channelable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Channelable) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelableList) DeepCopyInto(out *ChannelableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Channelable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelableList. +func (in *ChannelableList) DeepCopy() *ChannelableList { + if in == nil { + return nil + } + out := new(ChannelableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ChannelableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelableSpec) DeepCopyInto(out *ChannelableSpec) { + *out = *in + in.SubscribableSpec.DeepCopyInto(&out.SubscribableSpec) + if in.Delivery != nil { + in, out := &in.Delivery, &out.Delivery + *out = new(DeliverySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelableSpec. +func (in *ChannelableSpec) DeepCopy() *ChannelableSpec { + if in == nil { + return nil + } + out := new(ChannelableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelableStatus) DeepCopyInto(out *ChannelableStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.AddressStatus.DeepCopyInto(&out.AddressStatus) + in.SubscribableStatus.DeepCopyInto(&out.SubscribableStatus) + if in.DeadLetterChannel != nil { + in, out := &in.DeadLetterChannel, &out.DeadLetterChannel + *out = new(v1.KReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelableStatus. +func (in *ChannelableStatus) DeepCopy() *ChannelableStatus { + if in == nil { + return nil + } + out := new(ChannelableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliverySpec) DeepCopyInto(out *DeliverySpec) { + *out = *in + if in.DeadLetterSink != nil { + in, out := &in.DeadLetterSink, &out.DeadLetterSink + *out = new(v1.Destination) + (*in).DeepCopyInto(*out) + } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.BackoffPolicy != nil { + in, out := &in.BackoffPolicy, &out.BackoffPolicy + *out = new(BackoffPolicyType) + **out = **in + } + if in.BackoffDelay != nil { + in, out := &in.BackoffDelay, &out.BackoffDelay + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliverySpec. +func (in *DeliverySpec) DeepCopy() *DeliverySpec { + if in == nil { + return nil + } + out := new(DeliverySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStatus) DeepCopyInto(out *DeliveryStatus) { + *out = *in + if in.DeadLetterChannel != nil { + in, out := &in.DeadLetterChannel, &out.DeadLetterChannel + *out = new(v1.KReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStatus. +func (in *DeliveryStatus) DeepCopy() *DeliveryStatus { + if in == nil { + return nil + } + out := new(DeliveryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscribable) DeepCopyInto(out *Subscribable) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscribable. +func (in *Subscribable) DeepCopy() *Subscribable { + if in == nil { + return nil + } + out := new(Subscribable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscribable) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscribableList) DeepCopyInto(out *SubscribableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscribable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscribableList. +func (in *SubscribableList) DeepCopy() *SubscribableList { + if in == nil { + return nil + } + out := new(SubscribableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscribableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscribableSpec) DeepCopyInto(out *SubscribableSpec) { + *out = *in + if in.Subscribers != nil { + in, out := &in.Subscribers, &out.Subscribers + *out = make([]SubscriberSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscribableSpec. +func (in *SubscribableSpec) DeepCopy() *SubscribableSpec { + if in == nil { + return nil + } + out := new(SubscribableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscribableStatus) DeepCopyInto(out *SubscribableStatus) { + *out = *in + if in.Subscribers != nil { + in, out := &in.Subscribers, &out.Subscribers + *out = make([]SubscriberStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscribableStatus. +func (in *SubscribableStatus) DeepCopy() *SubscribableStatus { + if in == nil { + return nil + } + out := new(SubscribableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriberSpec) DeepCopyInto(out *SubscriberSpec) { + *out = *in + if in.SubscriberURI != nil { + in, out := &in.SubscriberURI, &out.SubscriberURI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + if in.ReplyURI != nil { + in, out := &in.ReplyURI, &out.ReplyURI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + if in.Delivery != nil { + in, out := &in.Delivery, &out.Delivery + *out = new(DeliverySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriberSpec. +func (in *SubscriberSpec) DeepCopy() *SubscriberSpec { + if in == nil { + return nil + } + out := new(SubscriberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriberStatus) DeepCopyInto(out *SubscriberStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriberStatus. +func (in *SubscriberStatus) DeepCopy() *SubscriberStatus { + if in == nil { + return nil + } + out := new(SubscriberStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger/trigger.go b/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger/trigger.go new file mode 100644 index 00000000..2435cda2 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger/trigger.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package trigger + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1" + factory "knative.dev/eventing/pkg/client/injection/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Eventing().V1().Triggers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.TriggerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1.TriggerInformer from context.") + } + return untyped.(v1.TriggerInformer) +} diff --git a/vendor/knative.dev/eventing/test/lib/resources/constants.go b/vendor/knative.dev/eventing/test/lib/resources/constants.go new file mode 100644 index 00000000..a77a9186 --- /dev/null +++ b/vendor/knative.dev/eventing/test/lib/resources/constants.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// API versions for the resources. +const ( + CoreAPIVersion = "v1" + EventingAPIVersion = "eventing.knative.dev/v1" + MessagingAPIVersion = "messaging.knative.dev/v1" + FlowsAPIVersion = "flows.knative.dev/v1" + ServingAPIVersion = "serving.knative.dev/v1" + SourcesV1A2APIVersion = "sources.knative.dev/v1alpha2" + SourcesV1B1APIVersion = "sources.knative.dev/v1beta1" + SourcesV1APIVersion = "sources.knative.dev/v1" +) + +// Kind for Knative resources. +const ( + KServiceKind string = "Service" +) + +var ( + // KServicesGVR is GroupVersionResource for Knative Service + KServicesGVR = schema.GroupVersionResource{ + Group: "serving.knative.dev", + Version: "v1", + Resource: "services", + } + // KServiceType is type of Knative Service + KServiceType = metav1.TypeMeta{ + Kind: "Service", + APIVersion: KServicesGVR.GroupVersion().String(), + } +) + +// Kind for core Kubernetes resources. +const ( + ServiceKind string = "Service" +) + +// Kind for eventing resources. +const ( + SubscriptionKind string = "Subscription" + + BrokerKind string = "Broker" + TriggerKind string = "Trigger" +) + +// Kind for messaging resources. +const ( + InMemoryChannelKind string = "InMemoryChannel" + + ChannelKind string = "Channel" + SequenceKind string = "Sequence" + ParallelKind string = "Parallel" +) + +// Kind for flows resources. +const ( + FlowsSequenceKind string = "Sequence" + FlowsParallelKind string = "Parallel" +) + +// Kind for sources resources that exist in Eventing core +const ( + ApiServerSourceKind string = "ApiServerSource" + PingSourceKind string = "PingSource" +) diff --git a/vendor/knative.dev/eventing/test/lib/resources/eventing.go b/vendor/knative.dev/eventing/test/lib/resources/eventing.go new file mode 100644 index 00000000..05af1b36 --- /dev/null +++ b/vendor/knative.dev/eventing/test/lib/resources/eventing.go @@ -0,0 +1,324 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// This file contains functions that construct Eventing resources. + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" + eventingduckv1beta1 "knative.dev/eventing/pkg/apis/duck/v1beta1" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// BrokerOption enables further configuration of a v1 Broker. +type BrokerOption func(*eventingv1.Broker) + +// TriggerOption enables further configuration of a v1 Trigger. +type TriggerOption func(*eventingv1.Trigger) + +// SubscriptionOption enables further configuration of a v1 Subscription. +type SubscriptionOption func(*messagingv1.Subscription) + +// DeliveryOption enables further configuration of DeliverySpec. +type DeliveryOption func(*eventingduckv1beta1.DeliverySpec) + +// channelRef returns an ObjectReference for a given Channel name. +func channelRef(name string, typemeta *metav1.TypeMeta) duckv1.KReference { + return duckv1.KReference{ + Kind: typemeta.Kind, + APIVersion: typemeta.APIVersion, + Name: name, + } +} + +func KnativeRefForService(name, namespace string) *duckv1.KReference { + return &duckv1.KReference{ + Kind: "Service", + APIVersion: "v1", + Name: name, + Namespace: namespace, + } +} + +func KnativeRefForBroker(name, namespace string) *duckv1.KReference { + return &duckv1.KReference{ + Kind: "Broker", + APIVersion: "eventing.knative.dev/v1", + Name: name, + Namespace: namespace, + } +} + +// WithSubscriberForSubscription returns an option that adds a Subscriber for the given +// v1 Subscription. +func WithSubscriberForSubscription(name string) SubscriptionOption { + return func(s *messagingv1.Subscription) { + if name != "" { + s.Spec.Subscriber = &duckv1.Destination{ + Ref: KnativeRefForService(name, ""), + } + } + } +} + +// WithReplyForSubscription returns an options that adds a ReplyStrategy for the given v1 Subscription. +func WithReplyForSubscription(name string, typemeta *metav1.TypeMeta) SubscriptionOption { + return func(s *messagingv1.Subscription) { + if name != "" { + s.Spec.Reply = &duckv1.Destination{ + Ref: &duckv1.KReference{ + Kind: typemeta.Kind, + APIVersion: typemeta.APIVersion, + Name: name, + Namespace: s.Namespace}, + } + } + } +} + +// WithDeadLetterSinkForSubscription returns an options that adds a DeadLetterSink for the given v1 Subscription. +func WithDeadLetterSinkForSubscription(name string) SubscriptionOption { + return func(s *messagingv1.Subscription) { + if name != "" { + delivery := s.Spec.Delivery + if delivery == nil { + delivery = &eventingduckv1.DeliverySpec{} + s.Spec.Delivery = delivery + } + + delivery.DeadLetterSink = &duckv1.Destination{ + Ref: KnativeRefForService(name, s.Namespace), + } + + } + } +} + +// SubscriptionV1 returns a v1 Subscription. +func Subscription( + name, channelName string, + channelTypeMeta *metav1.TypeMeta, + options ...SubscriptionOption, +) *messagingv1.Subscription { + subscription := &messagingv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: messagingv1.SubscriptionSpec{ + Channel: channelRef(channelName, channelTypeMeta), + }, + } + for _, option := range options { + option(subscription) + } + return subscription +} + +func WithConfigForBroker(config *duckv1.KReference) BrokerOption { + return func(b *eventingv1.Broker) { + b.Spec.Config = config + } +} + +// WithConfigMapForBrokerConfig returns a function that configures the ConfigMap +// for the Spec.Config for a given Broker. Note that the CM must exist and has +// to be in the same namespace as the Broker and have the same Name. Typically +// you'd do this by calling client.CreateBrokerConfigMapOrFail and then call this +// method. +// If those don't apply to your ConfigMap, look at WithConfigForBrokerV1Beta1 +func WithConfigMapForBrokerConfig() BrokerOption { + return func(b *eventingv1.Broker) { + b.Spec.Config = &duckv1.KReference{ + Name: b.Name, + Namespace: b.Namespace, + Kind: "ConfigMap", + APIVersion: "v1", + } + } +} + +// WithBrokerClassForBroker returns a function that adds a brokerClass +// annotation to the given v1 Broker. +func WithBrokerClassForBroker(brokerClass string) BrokerOption { + return func(b *eventingv1.Broker) { + annotations := b.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string, 1) + } + annotations["eventing.knative.dev/broker.class"] = brokerClass + b.SetAnnotations(annotations) + } +} + +// WithCustomAnnotationForBroker returns a function that adds a custom +// annotation to the given v1 Broker. +func WithCustomAnnotationForBroker(annotationKey, annotationValue string) BrokerOption { + return func(b *eventingv1.Broker) { + annotations := b.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string, 1) + } + annotations[annotationKey] = annotationValue + b.SetAnnotations(annotations) + } +} + +// WithDeliveryForBroker returns a function that adds a Delivery for the given +// v1 Broker. +func WithDeliveryForBroker(delivery *eventingduckv1.DeliverySpec) BrokerOption { + return func(b *eventingv1.Broker) { + b.Spec.Delivery = delivery + } +} + +// ConfigMap returns a ConfigMap. +func ConfigMap(name, namespace string, data map[string]string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + // Default label for a configmap being eligible to be propagated. + "knative.dev/config-propagation": "original", + }, + }, + Data: data, + } +} + +// Broker returns a v1 Broker. +func Broker(name string, options ...BrokerOption) *eventingv1.Broker { + broker := &eventingv1.Broker{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, option := range options { + option(broker) + } + return broker +} + +// WithAttributesTriggerFilter returns an option that adds a TriggerFilter with Attributes for the given Trigger. +func WithAttributesTriggerFilter(eventSource, eventType string, extensions map[string]interface{}) TriggerOption { + attrs := make(map[string]string) + if eventType != "" { + attrs["type"] = eventType + } else { + attrs["type"] = eventingv1.TriggerAnyFilter + } + if eventSource != "" { + attrs["source"] = eventSource + } else { + attrs["source"] = eventingv1.TriggerAnyFilter + } + for k, v := range extensions { + attrs[k] = fmt.Sprintf("%v", v) + } + return func(t *eventingv1.Trigger) { + t.Spec.Filter = &eventingv1.TriggerFilter{ + Attributes: eventingv1.TriggerFilterAttributes(attrs), + } + } +} + +// WithDependencyAnnotationTrigger returns an option that adds a dependency annotation to the given Trigger. +func WithDependencyAnnotationTrigger(dependencyAnnotation string) TriggerOption { + return func(t *eventingv1.Trigger) { + if t.Annotations == nil { + t.Annotations = make(map[string]string) + } + t.Annotations[eventingv1.DependencyAnnotation] = dependencyAnnotation + } +} + +// WithSubscriberServiceRefForTrigger returns an option that adds a Subscriber Knative Service Ref for the given v1 Trigger. +func WithSubscriberServiceRefForTrigger(name string) TriggerOption { + return WithSubscriberDestination(func(t *eventingv1.Trigger) duckv1.Destination { + return duckv1.Destination{ + Ref: KnativeRefForService(name, t.Namespace), + } + }) +} + +// WithSubscriberURIForTrigger returns an option that adds a Subscriber URI for the given v1 Trigger. +func WithSubscriberURIForTrigger(uri string) TriggerOption { + return WithSubscriberDestination(func(t *eventingv1.Trigger) duckv1.Destination { + apisURI, _ := apis.ParseURL(uri) + return duckv1.Destination{ + URI: apisURI, + } + }) +} + +// WithSubscriberDestination returns an option that adds a Subscriber for given +// duckv1.Destination. +func WithSubscriberDestination(destFactory func(t *eventingv1.Trigger) duckv1.Destination) TriggerOption { + return func(t *eventingv1.Trigger) { + dest := destFactory(t) + if dest.Ref != nil || dest.URI != nil { + t.Spec.Subscriber = dest + } + } +} + +// WithBroker returns an option that adds a broker for the given Trigger. +func WithBroker(name string) TriggerOption { + return func(trigger *eventingv1.Trigger) { + trigger.Spec.Broker = name + } +} + +// Trigger returns a v1 Trigger. +func Trigger(name string, options ...TriggerOption) *eventingv1.Trigger { + trigger := &eventingv1.Trigger{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, option := range options { + option(trigger) + } + return trigger +} + +// WithDeadLetterSinkForDelivery returns an options that adds a DeadLetterSink for the given DeliverySpec. +func WithDeadLetterSinkForDelivery(name string) DeliveryOption { + return func(delivery *eventingduckv1beta1.DeliverySpec) { + if name != "" { + delivery.DeadLetterSink = &duckv1.Destination{ + Ref: KnativeRefForService(name, ""), + } + } + } +} + +// Delivery returns a DeliverySpec. +func Delivery(options ...DeliveryOption) *eventingduckv1beta1.DeliverySpec { + delivery := &eventingduckv1beta1.DeliverySpec{} + for _, option := range options { + option(delivery) + } + return delivery +} diff --git a/vendor/knative.dev/eventing/test/lib/resources/kube.go b/vendor/knative.dev/eventing/test/lib/resources/kube.go new file mode 100644 index 00000000..a0f68094 --- /dev/null +++ b/vendor/knative.dev/eventing/test/lib/resources/kube.go @@ -0,0 +1,304 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// This file contains functions that construct common Kubernetes resources. + +import ( + "fmt" + + v1 "knative.dev/pkg/apis/duck/v1" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + pkgTest "knative.dev/pkg/test" +) + +// PodOption enables further configuration of a Pod. +type PodOption func(*corev1.Pod) + +// Option enables further configuration of a Role. +type RoleOption func(*rbacv1.Role) + +// HelloWorldPod creates a Pod that logs "Hello, World!". +func HelloWorldPod(name string, options ...PodOption) *corev1.Pod { + const imageName = "print" + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: imageName, + Image: pkgTest.ImagePath(imageName), + ImagePullPolicy: corev1.PullIfNotPresent, + }}, + RestartPolicy: corev1.RestartPolicyAlways, + }, + } + for _, option := range options { + option(pod) + } + return pod +} + +// WithLabelsForPod returns an option setting the pod labels +func WithLabelsForPod(labels map[string]string) PodOption { + return func(p *corev1.Pod) { + p.Labels = labels + } +} + +const ( + PerfConsumerService = "perf-consumer" + PerfAggregatorService = "perf-aggregator" + PerfServiceAccount = "perf-eventing" +) + +func PerformanceConsumerService() *corev1.Service { + return Service( + PerfConsumerService, + map[string]string{"role": "perf-consumer"}, + []corev1.ServicePort{{ + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromString("cloudevents"), + Name: "http", + }}, + ) +} + +func PerformanceAggregatorService() *corev1.Service { + return Service( + PerfAggregatorService, + map[string]string{"role": "perf-aggregator"}, + []corev1.ServicePort{{ + Protocol: corev1.ProtocolTCP, + Port: 10000, + TargetPort: intstr.FromString("grpc"), + Name: "grpc", + }}, + ) +} + +func PerformanceImageReceiverPod(imageName string, pace string, warmup string, aggregatorHostname string, additionalArgs ...string) *corev1.Pod { + const podName = "perf-receiver" + + args := append([]string{ + "--roles=receiver", + fmt.Sprintf("--pace=%s", pace), + fmt.Sprintf("--warmup=%s", warmup), + fmt.Sprintf("--aggregator=%s:10000", aggregatorHostname), + }, additionalArgs...) + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: map[string]string{ + "role": "perf-consumer", + }, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: PerfServiceAccount, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "receiver", + Image: pkgTest.ImagePath(imageName), + Args: args, + Env: []corev1.EnvVar{{ + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }}, + Ports: []corev1.ContainerPort{{ + Name: "cloudevents", + ContainerPort: 8080, + }}, + }}, + }, + } +} + +func PerformanceImageAggregatorPod(expectedRecords int, publish bool, additionalArgs ...string) *corev1.Pod { + const podName = "perf-aggregator" + const imageName = "performance" + + args := append([]string{ + "--roles=aggregator", + fmt.Sprintf("--publish=%v", publish), + fmt.Sprintf("--expect-records=%d", expectedRecords), + }, additionalArgs...) + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: map[string]string{ + "role": "perf-aggregator", + }, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: PerfServiceAccount, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "aggregator", + Image: pkgTest.ImagePath(imageName), + Args: args, + Env: []corev1.EnvVar{{ + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }}, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + Ports: []corev1.ContainerPort{{ + Name: "grpc", + ContainerPort: 10000, + }}, + }}, + }, + } +} + +// Service creates a Kubernetes Service with the given name, selector and ports +func Service(name string, selector map[string]string, ports []corev1.ServicePort) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + Ports: ports, + }, + } +} + +// Service creates a Kubernetes Service with the given name, namespace, and +// selector. Port 8080 is set as the target port. +func ServiceDefaultHTTP(name string, selector map[string]string) *corev1.Service { + return Service(name, selector, []corev1.ServicePort{{ + Name: "http", + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(8080), + }}) +} + +// ServiceRef returns a Service ObjectReference for a given Service name. +func ServiceRef(name string) *corev1.ObjectReference { + return pkgTest.CoreV1ObjectReference(ServiceKind, CoreAPIVersion, name) +} + +// ServiceKRef returns a Service ObjectReference for a given Service name. +func ServiceKRef(name string) *v1.KReference { + ref := pkgTest.CoreV1ObjectReference(ServiceKind, CoreAPIVersion, name) + return &v1.KReference{ + Kind: ref.Kind, + Namespace: ref.Namespace, + Name: ref.Name, + APIVersion: ref.APIVersion, + } +} + +// ServiceAccount creates a Kubernetes ServiceAccount with the given name and namespace. +func ServiceAccount(name, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +// RoleBinding creates a Kubernetes RoleBinding with the given ServiceAccount name and +// namespace, Role or ClusterRole Kind, name, RoleBinding name and namespace. +func RoleBinding(saName, saNamespace, rKind, rName, rbName, rbNamespace string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: rbName, + Namespace: rbNamespace, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: saName, + Namespace: saNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: rKind, + Name: rName, + APIGroup: rbacv1.SchemeGroupVersion.Group, + }, + } +} + +// ClusterRoleBinding creates a Kubernetes ClusterRoleBinding with the given ServiceAccount name and +// namespace, ClusterRole name, ClusterRoleBinding name. +func ClusterRoleBinding(saName, saNamespace, crName, crbName string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: crbName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: saName, + Namespace: saNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: crName, + APIGroup: rbacv1.SchemeGroupVersion.Group, + }, + } +} + +// EventWatcherRole creates a Kubernetes Role +func Role(rName string, options ...RoleOption) *rbacv1.Role { + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: rName, + }, + Rules: []rbacv1.PolicyRule{}, + } + for _, option := range options { + option(role) + } + return role +} + +// WithRuleForRole is a Role Option for adding a rule +func WithRuleForRole(rule *rbacv1.PolicyRule) RoleOption { + return func(r *rbacv1.Role) { + r.Rules = append(r.Rules, *rule) + } +} diff --git a/vendor/knative.dev/eventing/test/lib/resources/meta.go b/vendor/knative.dev/eventing/test/lib/resources/meta.go new file mode 100644 index 00000000..6059612f --- /dev/null +++ b/vendor/knative.dev/eventing/test/lib/resources/meta.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MetaResource includes necessary meta data to retrieve the generic Kubernetes resource. +type MetaResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} + +// MetaResourceList includes necessary meta data to retrieve the generic Kubernetes resource list. +type MetaResourceList struct { + metav1.TypeMeta `json:",inline"` + Namespace string +} + +// NewMetaResource returns a MetaResource built from the given name, namespace and typemeta. +func NewMetaResource(name, namespace string, typemeta *metav1.TypeMeta) *MetaResource { + return &MetaResource{ + TypeMeta: *typemeta, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } +} + +// NewMetaResourceList returns a MetaResourceList built from the given namespace and typemeta. +func NewMetaResourceList(namespace string, typemeta *metav1.TypeMeta) *MetaResourceList { + return &MetaResourceList{ + TypeMeta: *typemeta, + Namespace: namespace, + } +} diff --git a/vendor/knative.dev/eventing/test/lib/resources/serving.go b/vendor/knative.dev/eventing/test/lib/resources/serving.go new file mode 100644 index 00000000..cfa516e4 --- /dev/null +++ b/vendor/knative.dev/eventing/test/lib/resources/serving.go @@ -0,0 +1,138 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// This file contains functions that construct Serving resources. + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + duckv1 "knative.dev/pkg/apis/duck/v1" + pkgTest "knative.dev/pkg/test" +) + +// ServingClient holds clients required to get serving resources +type ServingClient struct { + Kube kubernetes.Interface + Dynamic dynamic.Interface +} + +// KServiceRoute represents ksvc route, so how much traffic is routed to given deployment +type KServiceRoute struct { + TrafficPercent uint8 + DeploymentName string +} + +// WithSubscriberKServiceRefForTrigger returns an option that adds a Subscriber +// Knative Service Ref for the given Trigger. +func WithSubscriberKServiceRefForTrigger(name string) TriggerOption { + return func(t *eventingv1.Trigger) { + if name != "" { + t.Spec.Subscriber = duckv1.Destination{ + Ref: KnativeRefForKservice(name, t.Namespace), + } + } + } +} + +// KnativeRefForKservice return a duck reference for Knative Service +func KnativeRefForKservice(name, namespace string) *duckv1.KReference { + return &duckv1.KReference{ + Kind: KServiceKind, + APIVersion: ServingAPIVersion, + Name: name, + Namespace: namespace, + } +} + +// KServiceRef returns a Knative Service ObjectReference for a given Service name. +func KServiceRef(name string) *corev1.ObjectReference { + return pkgTest.CoreV1ObjectReference(KServiceKind, ServingAPIVersion, name) +} + +// KServiceRoutes gets routes of given ksvc. +// If ksvc isn't ready yet second return value will be false. +func KServiceRoutes(client ServingClient, name, namespace string) ([]KServiceRoute, bool, error) { + serving := client.Dynamic.Resource(KServicesGVR).Namespace(namespace) + unstruct, err := serving.Get(context.Background(), name, metav1.GetOptions{}) + if k8serrors.IsNotFound(err) { + // Return false as we are not done yet. + // We swallow the error to keep on polling. + // It should only happen if we wait for the auto-created resources, like default Broker. + return nil, false, nil + } else if err != nil { + // Return error to stop the polling. + return nil, false, err + } + + routes, ready := ksvcRoutes(unstruct) + return routes, ready, nil +} + +// KServiceDeploymentName returns a name of deployment of Knative Service that +// receives 100% of traffic. +// If ksvc isn't ready yet second return value will be false. +func KServiceDeploymentName(client ServingClient, name, namespace string) (string, bool, error) { + routes, ready, err := KServiceRoutes(client, name, namespace) + if ready { + if len(routes) > 1 { + return "", false, fmt.Errorf("traffic shouldn't be split to more then 1 revision: %v", routes) + } + r := routes[0] + return r.DeploymentName, true, nil + } + + return "", ready, err +} + +func ksvcRoutes(un *unstructured.Unstructured) ([]KServiceRoute, bool) { + routes := make([]KServiceRoute, 0) + content := un.UnstructuredContent() + maybeStatus, ok := content["status"] + if !ok { + return routes, false + } + status := maybeStatus.(map[string]interface{}) + maybeTraffic, ok := status["traffic"] + if !ok { + return routes, false + } + traffic := maybeTraffic.([]interface{}) + if len(traffic) == 0 { + // continue to wait + return routes, false + } + for _, uRoute := range traffic { + route := uRoute.(map[string]interface{}) + revisionName := route["revisionName"].(string) + percent := uint8(route["percent"].(int64)) + deploymentName := fmt.Sprintf("%s-deployment", revisionName) + routes = append(routes, KServiceRoute{ + TrafficPercent: percent, + DeploymentName: deploymentName, + }) + } + return routes, true +} diff --git a/vendor/knative.dev/hack/OWNERS b/vendor/knative.dev/hack/OWNERS new file mode 100644 index 00000000..4d20bf8c --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS @@ -0,0 +1,8 @@ +approvers: + - technical-oversight-committee + - productivity-writers + - knative-release-leads + +reviewers: + - productivity-writers + - productivity-reviewers diff --git a/vendor/knative.dev/hack/OWNERS_ALIASES b/vendor/knative.dev/hack/OWNERS_ALIASES new file mode 100644 index 00000000..808fdf52 --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS_ALIASES @@ -0,0 +1,183 @@ +# This file is auto-generated from peribolos. +# Do not modify this file, instead modify peribolos/knative.yaml + +aliases: + client-reviewers: + - itsmurugappan + client-wg-leads: + - dsimansk + - navidshaikh + - rhuss + client-writers: + - dsimansk + - maximilien + - navidshaikh + - rhuss + - vyasgun + conformance-task-force-leads: + - salaboy + conformance-writers: + - salaboy + docs-reviewers: + - nainaz + - nak3 + - pmbanugo + - retocode + - skonto + - snneji + docs-wg-leads: + - snneji + docs-writers: + - csantanapr + - nak3 + - retocode + - skonto + - snneji + eventing-reviewers: + - Leo6Leo + - aslom + - cali0707 + - creydr + eventing-triage: + - lberk + eventing-wg-leads: + - pierDipi + eventing-writers: + - aliok + - creydr + - lberk + - lionelvillard + - matzew + - odacremolbap + - pierDipi + func-reviewers: + - gauron99 + - jrangelramos + - nainaz + func-writers: + - jrangelramos + - lance + - lkingland + - matejvasek + - salaboy + functions-wg-leads: + - lkingland + - salaboy + knative-admin: + - Cali0707 + - ReToCode + - creydr + - csantanapr + - davidhadas + - dprotaso + - dsimansk + - knative-automation + - knative-prow-releaser-robot + - knative-prow-robot + - knative-prow-updater-robot + - knative-test-reporter-robot + - kvmware + - lance + - mchmarny + - nainaz + - pierDipi + - psschwei + - puerco + - salaboy + - skonto + - smoser-ibm + - upodroid + - xtreme-sameer-vohra + knative-release-leads: + - Cali0707 + - ReToCode + - creydr + - dsimansk + - pierDipi + - skonto + knative-robots: + - knative-automation + - knative-prow-releaser-robot + - knative-prow-robot + - knative-prow-updater-robot + - knative-test-reporter-robot + operations-reviewers: + - aliok + - houshengbo + - matzew + - maximilien + operations-wg-leads: + - houshengbo + operations-writers: + - aliok + - houshengbo + - matzew + - maximilien + productivity-leads: + - kvmware + - upodroid + productivity-reviewers: + - evankanderson + - mgencur + productivity-wg-leads: + - kvmware + - upodroid + productivity-writers: + - cardil + - kvmware + - upodroid + security-wg-leads: + - davidhadas + - evankanderson + security-writers: + - davidhadas + - evankanderson + serving-approvers: + - nak3 + - psschwei + - skonto + serving-reviewers: + - KauzClay + - jsanin-vmw + - kauana + - kvmware + - retocode + - skonto + - xtreme-vikram-yadav + serving-triage: + - KauzClay + - retocode + - skonto + serving-wg-leads: + - dprotaso + serving-writers: + - dprotaso + - nak3 + - psschwei + - skonto + steering-committee: + - csantanapr + - lance + - nainaz + - puerco + - salaboy + technical-oversight-committee: + - davidhadas + - dprotaso + - dsimansk + - kvmware + - psschwei + trademark-committee: + - mchmarny + - smoser-ibm + - xtreme-sameer-vohra + ux-wg-leads: + - cali0707 + - leo6leo + - mmejia02 + - zainabhusain227 + ux-writers: + - cali0707 + - leo6leo + - mmejia02 + - zainabhusain227 diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS new file mode 100644 index 00000000..13014203 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -0,0 +1,15 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- technical-oversight-committee +- serving-wg-leads +- eventing-wg-leads + +reviewers: +- serving-writers +- eventing-writers +- eventing-reviewers +- serving-reviewers + +options: + no_parent_owners: true diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS new file mode 100644 index 00000000..af1eb05d --- /dev/null +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -0,0 +1,8 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- eventing-wg-leads + +reviewers: +- eventing-reviewers +- eventing-writers diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS new file mode 100644 index 00000000..64660c9e --- /dev/null +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-reviewers diff --git a/vendor/knative.dev/pkg/hack/OWNERS b/vendor/knative.dev/pkg/hack/OWNERS new file mode 100644 index 00000000..65aa9e7b --- /dev/null +++ b/vendor/knative.dev/pkg/hack/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-writers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS new file mode 100644 index 00000000..136197a3 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-writers diff --git a/vendor/knative.dev/pkg/resolver/OWNERS b/vendor/knative.dev/pkg/resolver/OWNERS new file mode 100644 index 00000000..b5e9581f --- /dev/null +++ b/vendor/knative.dev/pkg/resolver/OWNERS @@ -0,0 +1,8 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- eventing-writers + +reviewers: +- eventing-reviewers + diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS new file mode 100644 index 00000000..65aa9e7b --- /dev/null +++ b/vendor/knative.dev/pkg/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-writers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/pkg/test/README.md b/vendor/knative.dev/pkg/test/README.md new file mode 100644 index 00000000..d54c9ae2 --- /dev/null +++ b/vendor/knative.dev/pkg/test/README.md @@ -0,0 +1,218 @@ +# Test + +This directory contains tests and testing docs. + +- [Test library](#test-library) contains code you can use in your `knative` + tests +- [Flags](#flags) added by [the test library](#test-library) +- [Unit tests](#running-unit-tests) currently reside in the codebase alongside + the code they test + +## Running unit tests + +To run all unit tests: + +```bash +go test ./... +``` + +## Test library + +You can use the test library in this dir to: + +- [Use common test flags](#use-common-test-flags) +- [Output logs](#output-logs) +- [Ensure test cleanup](#ensure-test-cleanup) + +### Use common test flags + +These flags are useful for running against an existing cluster, making use of +your existing +[environment setup](https://github.com/knative/serving/blob/main/DEVELOPMENT.md#environment-setup). + +By importing `knative.dev/pkg/test` you get access to a global variable called +`test.Flags` which holds the values of +[the command line flags](/test/README.md#flags). + +```go +logger.Infof("Using namespace %s", test.Flags.Namespace) +``` + +_See [e2e_flags.go](./e2e_flags.go)._ + +### Output logs + +[When tests are run with `--logverbose` option](README.md#output-verbose-logs), +debug logs will be emitted to stdout. + +We are using a generic +[FormatLogger](https://github.com/knative/pkg/blob/main/test/logging/logging.go#L49) +that can be passed in any existing logger that satisfies it. Test can use the +generic [logging methods](https://golang.org/pkg/testing/#T) to log info and +error logs. All the common methods accept generic FormatLogger as a parameter +and tests can pass in `t.Logf` like this: + +```go +_, err = pkgTest.WaitForEndpointState( + kubeClient, + t.Logf, + ...), +``` + +_See [logging.go](./logging/logging.go)._ + +### Check Knative Serving resources + +_WARNING: this code also exists in +[`knative/serving`](https://github.com/knative/serving/blob/main/test/adding_tests.md#make-requests-against-deployed-services)._ + +After creating Knative Serving resources or making changes to them, you will +need to wait for the system to realize those changes. You can use the Knative +Serving CRD check and polling methods to check the resources are either in or +reach the desired state. + +The `WaitFor*` functions use the kubernetes +[`wait` package](https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll +they use +[`PollImmediate`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +[`ConditionFunc`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a `bool` to indicate if the function should stop or continue polling, and an +`error` to indicate if there has been an error. + +For example, you can poll a `Configuration` object to find the name of the +`Revision` that was created for it: + +```go +var revisionName string +err := test.WaitForConfigurationState( + clients.ServingClient, configName, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != "" { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") +``` + +_See [kube_checks.go](./kube_checks.go)._ + +### Ensure test cleanup + +To ensure your test is cleaned up, you should defer cleanup to execute after +your test completes and also ensure the cleanup occurs if the test is +interrupted: + +```go +defer tearDown(clients) +test.CleanupOnInterrupt(func() { tearDown(clients) }) +``` + +_See [cleanup.go](./cleanup.go)._ + +## Flags + +Importing [the test library](#test-library) adds flags that are useful for end +to end tests that need to run against a cluster. + +Tests importing [`knative.dev/pkg/test`](#test-library) recognize these flags: + +- [`--kubeconfig`](#specifying-kubeconfig) +- [`--cluster`](#specifying-cluster) +- [`--namespace`](#specifying-namespace) +- [`--logverbose`](#output-verbose-logs) +- [`--ingressendpoint`](#specifying-ingress-endpoint) +- [`--dockerrepo`](#specifying-docker-repo) +- [`--tag`](#specifying-tag) +- [`--imagetemplate`](#specifying-image-template) + +### Specifying kubeconfig + +By default the tests will use the +[kubeconfig file](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +at `~/.kube/config`. If there is an error getting the current user, it will use +`kubeconfig` instead as the default value. You can specify a different config +file with the argument `--kubeconfig`. + +To run tests with a non-default kubeconfig file: + +```bash +go test ./test --kubeconfig /my/path/kubeconfig +``` + +### Specifying cluster + +The `--cluster` argument lets you use a different cluster than +[your specified kubeconfig's](#specifying-kubeconfig) active context. + +```bash +go test ./test --cluster your-cluster-name +``` + +The current cluster names can be obtained by running: + +```bash +kubectl config get-clusters +``` + +### Specifying ingress endpoint + +The `--ingressendpoint` argument lets you specify a static url to use as the +ingress server during tests. This is useful for Kubernetes configurations which +do not provide external IPs. + +```bash +go test ./test --ingressendpoint :32380 +``` + +### Specifying namespace + +The `--namespace` argument lets you specify the namespace to use for the tests. +By default, tests will use `serving-tests`. + +```bash +go test ./test --namespace your-namespace-name +``` + +### Output verbose logs + +The `--logverbose` argument lets you see verbose test logs and k8s logs. + +```bash +go test ./test --logverbose +``` + +### Specifying docker repo + +The `--dockerrepo` argument lets you specify a uri of the docker repo where you +have uploaded the test image to using `uploadtestimage.sh`. Defaults to +`$KO_DOCKER_REPO` + +```bash +go test ./test --dockerrepo myspecialdockerrepo +``` + +### Specifying tag + +The `--tag` argument lets you specify the version tag for the test images. + +```bash +go test ./test --tag v1.0 +``` + +### Specifying image template + +The `--imagetemplate` argument lets you specify a template to generate the +reference to an image from the test. Defaults to +`{{.Repository}}/{{.Name}}:{{.Tag}}` + +```bash +go test ./test --imagetemplate {{.Repository}}/{{.Name}}:{{.Tag}} +``` + +--- + +Except as otherwise noted, the content of this page is licensed under the +[Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/), +and code samples are licensed under the +[Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/vendor/knative.dev/pkg/test/cleanup.go b/vendor/knative.dev/pkg/test/cleanup.go new file mode 100644 index 00000000..7fc91bc5 --- /dev/null +++ b/vendor/knative.dev/pkg/test/cleanup.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "os" + "os/signal" + "sync" +) + +type logFunc func(template string, args ...interface{}) + +var cleanup struct { + once sync.Once + mutex sync.RWMutex + funcs []func() +} + +func waitForInterrupt() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + + go func() { + <-c + + cleanup.mutex.RLock() + defer cleanup.mutex.RUnlock() + + for i := len(cleanup.funcs) - 1; i >= 0; i-- { + cleanup.funcs[i]() + } + + os.Exit(1) + }() +} + +// CleanupOnInterrupt will execute the function if an interrupt signal is caught +// Deprecated - use OnInterrupt +func CleanupOnInterrupt(f func(), log logFunc) { + OnInterrupt(f) +} + +// OnInterrupt registers a cleanup function to run if an interrupt signal is caught +func OnInterrupt(cleanupFunc func()) { + cleanup.once.Do(waitForInterrupt) + + cleanup.mutex.Lock() + defer cleanup.mutex.Unlock() + + cleanup.funcs = append(cleanup.funcs, cleanupFunc) +} diff --git a/vendor/knative.dev/pkg/test/clients.go b/vendor/knative.dev/pkg/test/clients.go new file mode 100644 index 00000000..88530c36 --- /dev/null +++ b/vendor/knative.dev/pkg/test/clients.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains an object which encapsulates k8s clients which are useful for e2e tests. + +package test + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" +) + +// NewSpoofingClient returns a spoofing client to make requests +func NewSpoofingClient(ctx context.Context, client kubernetes.Interface, logf logging.FormatLogger, + domain string, resolvable bool, opts ...spoof.TransportOption) (*spoof.SpoofingClient, error) { + return spoof.New(ctx, client, logf, domain, resolvable, Flags.IngressEndpoint, + Flags.SpoofRequestInterval, Flags.SpoofRequestTimeout, opts...) +} + +// BuildClientConfig builds the client config specified by the config path and the cluster name +func BuildClientConfig(kubeConfigPath string, clusterName string) (*rest.Config, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := clientcmd.ConfigOverrides{} + + if kubeConfigPath != "" { + loadingRules.ExplicitPath = kubeConfigPath + } + // Override the cluster name if provided. + if clusterName != "" { + overrides.Context.Cluster = clusterName + } + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loadingRules, + &overrides).ClientConfig() +} + +// UpdateConfigMap updates the config map for specified @name with values +func UpdateConfigMap(ctx context.Context, client kubernetes.Interface, name string, configName string, values map[string]string) error { + configMap, err := client.CoreV1().ConfigMaps(name).Get(ctx, configName, metav1.GetOptions{}) + if err != nil { + return err + } + + for key, value := range values { + configMap.Data[key] = value + } + + _, err = client.CoreV1().ConfigMaps(name).Update(ctx, configMap, metav1.UpdateOptions{}) + return err +} + +// CreatePod will create a Pod +func CreatePod(ctx context.Context, client kubernetes.Interface, pod *corev1.Pod) (*corev1.Pod, error) { + pods := client.CoreV1().Pods(pod.GetNamespace()) + return pods.Create(ctx, pod, metav1.CreateOptions{}) +} + +// PodLogs returns Pod logs for given Pod and Container in the namespace +func PodLogs(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace string) ([]byte, error) { + pods := client.CoreV1().Pods(namespace) + podList, err := pods.List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + for i := range podList.Items { + // Pods are big, so avoid copying. + pod := &podList.Items[i] + if strings.Contains(pod.Name, podName) { + result := pods.GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: containerName, + }).Do(ctx) + return result.Raw() + } + } + return nil, fmt.Errorf("could not find logs for %s/%s:%s", namespace, podName, containerName) +} diff --git a/vendor/knative.dev/pkg/test/crd.go b/vendor/knative.dev/pkg/test/crd.go new file mode 100644 index 00000000..eb7c0e0b --- /dev/null +++ b/vendor/knative.dev/pkg/test/crd.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains functions that construct boilerplate CRD definitions. + +package test + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + nginxPort = 80 + nginxName = "nginx" + nginxImage = "nginx:1.7.9" +) + +// ServiceAccount returns ServiceAccount object in given namespace +func ServiceAccount(name, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +// ClusterRoleBinding returns ClusterRoleBinding for given subject and role +func ClusterRoleBinding(name string, namespace string, serviceAccount string, role string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: role, + APIGroup: "rbac.authorization.k8s.io", + }, + } +} + +// CoreV1ObjectReference returns a corev1.ObjectReference for the given name, kind and apiversion +func CoreV1ObjectReference(kind, apiversion, name string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: kind, + APIVersion: apiversion, + Name: name, + } +} + +// NginxPod returns nginx pod defined in given namespace +func NginxPod(namespace string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: nginxName, + Namespace: namespace, + Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: nginxName, + Image: nginxImage, + Ports: []corev1.ContainerPort{ + { + ContainerPort: nginxPort, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/knative.dev/pkg/test/e2e_flags.go b/vendor/knative.dev/pkg/test/e2e_flags.go new file mode 100644 index 00000000..ef9b5519 --- /dev/null +++ b/vendor/knative.dev/pkg/test/e2e_flags.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains logic to encapsulate flags which are needed to specify +// what cluster, etc. to use for e2e tests. + +package test + +import ( + "bytes" + "flag" + "text/template" + + env "knative.dev/pkg/environment" + testenv "knative.dev/pkg/test/environment" + "knative.dev/pkg/test/logging" +) + +var ( + // Flags holds the command line flags or defaults for settings in the user's environment. + // See EnvironmentFlags for a list of supported fields. + // Deprecated: use test/flags.Flags() + Flags = initializeFlags() +) + +// EnvironmentFlags define the flags that are needed to run the e2e tests. +// Deprecated: use test/flags.Flags() or injection.Flags() +type EnvironmentFlags struct { + env.ClientConfig + testenv.TestClientConfig +} + +func initializeFlags() *EnvironmentFlags { + f := new(EnvironmentFlags) + + f.ClientConfig.InitFlags(flag.CommandLine) + f.TestClientConfig.InitFlags(flag.CommandLine) + + return f +} + +// SetupLoggingFlags initializes a logger for tests. +// TODO(coryrc): Remove once other repos are moved to call logging.InitializeLogger() directly +func SetupLoggingFlags() { + logging.InitializeLogger() +} + +// ImagePath is a helper function to transform an image name into an image reference that can be pulled. +func ImagePath(name string) string { + tpl, err := template.New("image").Parse(Flags.ImageTemplate) + if err != nil { + panic("could not parse image template: " + err.Error()) + } + + var buf bytes.Buffer + if err := tpl.Execute(&buf, struct { + Repository string + Name string + Tag string + }{ + Repository: Flags.DockerRepo, + Name: name, + Tag: Flags.Tag, + }); err != nil { + panic("could not apply the image template: " + err.Error()) + } + return buf.String() +} diff --git a/vendor/knative.dev/pkg/test/environment/config.go b/vendor/knative.dev/pkg/test/environment/config.go new file mode 100644 index 00000000..6591a463 --- /dev/null +++ b/vendor/knative.dev/pkg/test/environment/config.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains logic to encapsulate flags which are needed to specify +// what cluster, etc. to use for e2e tests. + +package environment + +import ( + "flag" + "os" + "time" +) + +// TestClientConfig defines propertis about the test environment +type TestClientConfig struct { + Namespace string // K8s namespace (blank by default, to be overwritten by test suite) + IngressEndpoint string // Host to use for ingress endpoint + ImageTemplate string // Template to build the image reference (defaults to {{.Repository}}/{{.Name}}:{{.Tag}}) + DockerRepo string // Docker repo (defaults to $KO_DOCKER_REPO) + Tag string // Tag for test images + SpoofRequestInterval time.Duration // SpoofRequestInterval is the interval between requests in SpoofingClient + SpoofRequestTimeout time.Duration // SpoofRequestTimeout is the timeout for polling requests in SpoofingClient +} + +// InitFlags is for explicitly initializing the flags. +func (c *TestClientConfig) InitFlags(fs *flag.FlagSet) { + fs.StringVar(&c.Namespace, "namespace", "", + "Provide the namespace you would like to use for these tests.") + + fs.StringVar(&c.IngressEndpoint, "ingressendpoint", "", "Provide a static endpoint url to the ingress server used during tests.") + + fs.StringVar(&c.ImageTemplate, "imagetemplate", "{{.Repository}}/{{.Name}}:{{.Tag}}", + "Provide a template to generate the reference to an image from the test. Defaults to `{{.Repository}}/{{.Name}}:{{.Tag}}`.") + + fs.DurationVar(&c.SpoofRequestInterval, "spoofinterval", 1*time.Second, + "Provide an interval between requests for the SpoofingClient") + + fs.DurationVar(&c.SpoofRequestTimeout, "spooftimeout", 5*time.Minute, + "Provide a request timeout for the SpoofingClient") + + defaultRepo := os.Getenv("KO_DOCKER_REPO") + fs.StringVar(&c.DockerRepo, "dockerrepo", defaultRepo, + "Provide the uri of the docker repo you have uploaded the test image to using `uploadtestimage.sh`. Defaults to $KO_DOCKER_REPO") + + fs.StringVar(&c.Tag, "tag", "latest", "Provide the version tag for the test images.") +} diff --git a/vendor/knative.dev/pkg/test/ingress/ingress.go b/vendor/knative.dev/pkg/test/ingress/ingress.go new file mode 100644 index 00000000..8de4052c --- /dev/null +++ b/vendor/knative.dev/pkg/test/ingress/ingress.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "fmt" + "os" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + // TODO(tcnghia): These probably shouldn't be hard-coded here? + istioIngressNamespace = "istio-system" + istioIngressName = "istio-ingressgateway" +) + +// GetIngressEndpoint gets the ingress public IP or hostname. +// address - is the endpoint to which we should actually connect. +// portMap - translates the request's port to the port on address to which the caller +// +// should connect. This is used when the resolution to address goes through some +// sort of port-mapping, e.g. Kubernetes node ports. +// +// err - an error when address/portMap cannot be established. +func GetIngressEndpoint(ctx context.Context, kubeClientset kubernetes.Interface, endpointOverride string) (address string, portMap func(string) string, err error) { + ingressName := istioIngressName + if gatewayOverride := os.Getenv("GATEWAY_OVERRIDE"); gatewayOverride != "" { + ingressName = gatewayOverride + } + ingressNamespace := istioIngressNamespace + if gatewayNsOverride := os.Getenv("GATEWAY_NAMESPACE_OVERRIDE"); gatewayNsOverride != "" { + ingressNamespace = gatewayNsOverride + } + + ingress, err := kubeClientset.CoreV1().Services(ingressNamespace).Get(ctx, ingressName, metav1.GetOptions{}) + if err != nil { + return "", nil, err + } + + // If an override is provided, use it + if endpointOverride != "" { + return endpointOverride, func(port string) string { + for _, sp := range ingress.Spec.Ports { + if fmt.Sprint(sp.Port) == port { + return fmt.Sprint(sp.NodePort) + } + } + return port + }, nil + } + endpoint, err := EndpointFromService(ingress) + if err != nil { + return "", nil, err + } + return endpoint, func(in string) string { return in }, nil +} + +// EndpointFromService extracts the endpoint from the service's ingress. +func EndpointFromService(svc *v1.Service) (string, error) { + ingresses := svc.Status.LoadBalancer.Ingress + if len(ingresses) != 1 { + return "", fmt.Errorf("expected exactly one ingress load balancer, instead had %d: %v", len(ingresses), ingresses) + } + itu := ingresses[0] + + switch { + case itu.IP != "": + return itu.IP, nil + case itu.Hostname != "": + return itu.Hostname, nil + default: + return "", fmt.Errorf("expected ingress loadbalancer IP or hostname for %s to be set, instead was empty", svc.Name) + } +} diff --git a/vendor/knative.dev/pkg/test/kube_checks.go b/vendor/knative.dev/pkg/test/kube_checks.go new file mode 100644 index 00000000..3e295a55 --- /dev/null +++ b/vendor/knative.dev/pkg/test/kube_checks.go @@ -0,0 +1,282 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// kube_checks contains functions which poll Kubernetes objects until +// they get into the state desired by the caller or time out. + +package test + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + k8styped "k8s.io/client-go/kubernetes/typed/core/v1" + + "knative.dev/pkg/test/logging" +) + +const ( + interval = 1 * time.Second + podTimeout = 8 * time.Minute + logTimeout = 1 * time.Minute +) + +// WaitForDeploymentState polls the status of the Deployment called name +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForDeploymentState(ctx context.Context, client kubernetes.Interface, name string, inState func(d *appsv1.Deployment) (bool, error), desc string, namespace string, timeout time.Duration) error { + d := client.AppsV1().Deployments(namespace) + span := logging.GetEmitableSpan(ctx, fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc)) + defer span.End() + var lastState *appsv1.Deployment + waitErr := wait.PollImmediate(interval, timeout, func() (bool, error) { + var err error + lastState, err = d.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("deployment %q is not in desired state, got: %s: %w", name, spew.Sprint(lastState), waitErr) + } + return nil +} + +// WaitForPodListState polls the status of the PodList +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took to get into the state checked by inState. +func WaitForPodListState(ctx context.Context, client kubernetes.Interface, inState func(p *corev1.PodList) (bool, error), desc string, namespace string) error { + p := client.CoreV1().Pods(namespace) + span := logging.GetEmitableSpan(ctx, "WaitForPodListState/"+desc) + defer span.End() + + var lastState *corev1.PodList + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + lastState, err = p.List(ctx, metav1.ListOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("pod list is not in desired state, got: %s: %w", spew.Sprint(lastState), waitErr) + } + return nil +} + +// WaitForPodState polls the status of the specified Pod +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took to get into the state checked by inState. +func WaitForPodState(ctx context.Context, client kubernetes.Interface, inState func(p *corev1.Pod) (bool, error), name string, namespace string) error { + p := client.CoreV1().Pods(namespace) + span := logging.GetEmitableSpan(ctx, "WaitForPodState/"+name) + defer span.End() + + var lastState *corev1.Pod + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + lastState, err = p.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("pod %q is not in desired state, got: %s: %w", name, spew.Sprint(lastState), waitErr) + } + return nil +} + +// WaitForPodDeleted waits for the given pod to disappear from the given namespace. +func WaitForPodDeleted(ctx context.Context, client kubernetes.Interface, name, namespace string) error { + if err := WaitForPodState(ctx, client, func(p *corev1.Pod) (bool, error) { + // Always return false. We're oly interested in the error which indicates pod deletion or timeout. + return false, nil + }, name, namespace); err != nil { + if !apierrs.IsNotFound(err) { + return err + } + } + return nil +} + +// WaitForServiceEndpoints polls the status of the specified Service +// from client every interval until number of service endpoints = numOfEndpoints +func WaitForServiceEndpoints(ctx context.Context, client kubernetes.Interface, svcName string, svcNamespace string, numOfEndpoints int) error { + endpointsService := client.CoreV1().Endpoints(svcNamespace) + span := logging.GetEmitableSpan(ctx, "WaitForServiceHasAtLeastOneEndpoint/"+svcName) + defer span.End() + + var endpoints *corev1.Endpoints + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + endpoints, err = endpointsService.Get(ctx, svcName, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + + return countEndpointsNum(endpoints) == numOfEndpoints, nil + }) + if waitErr != nil { + return fmt.Errorf("did not reach the desired number of endpoints, got: %d: %w", countEndpointsNum(endpoints), waitErr) + } + return nil +} + +func countEndpointsNum(e *corev1.Endpoints) int { + if e == nil || e.Subsets == nil { + return 0 + } + num := 0 + for _, sub := range e.Subsets { + num += len(sub.Addresses) + } + return num +} + +// GetEndpointAddresses returns addresses of endpoints for the given service. +func GetEndpointAddresses(ctx context.Context, client kubernetes.Interface, svcName, svcNamespace string) ([]string, error) { + endpoints, err := client.CoreV1().Endpoints(svcNamespace).Get(ctx, svcName, metav1.GetOptions{}) + if err != nil || countEndpointsNum(endpoints) == 0 { + return nil, fmt.Errorf("no endpoints or error: %w", err) + } + var hosts []string + for _, sub := range endpoints.Subsets { + for _, addr := range sub.Addresses { + hosts = append(hosts, addr.IP) + } + } + return hosts, nil +} + +// WaitForChangedEndpoints waits until the endpoints for the given service differ from origEndpoints. +func WaitForChangedEndpoints(ctx context.Context, client kubernetes.Interface, svcName, svcNamespace string, origEndpoints []string) error { + var newEndpoints []string + waitErr := wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { + var err error + newEndpoints, err = GetEndpointAddresses(ctx, client, svcName, svcNamespace) + return !cmp.Equal(origEndpoints, newEndpoints), err + }) + if waitErr != nil { + return fmt.Errorf("new endpoints are not different from the original ones, got %q: %w", newEndpoints, waitErr) + } + return nil +} + +// GetConfigMap gets the configmaps for a given namespace +func GetConfigMap(client kubernetes.Interface, namespace string) k8styped.ConfigMapInterface { + return client.CoreV1().ConfigMaps(namespace) +} + +// DeploymentScaledToZeroFunc returns a func that evaluates if a deployment has scaled to 0 pods +func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) { + return func(d *appsv1.Deployment) (bool, error) { + return d.Status.ReadyReplicas == 0, nil + } +} + +// WaitForLogContent waits until logs for given Pod/Container include the given content. +// If the content is not present within timeout it returns error. +func WaitForLogContent(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace, content string) error { + var logs []byte + waitErr := wait.PollImmediate(interval, logTimeout, func() (bool, error) { + var err error + logs, err = PodLogs(ctx, client, podName, containerName, namespace) + if err != nil { + return true, err + } + return strings.Contains(string(logs), content), nil + }) + if waitErr != nil { + return fmt.Errorf("logs do not contain the desired content %q, got %q: %w", content, logs, waitErr) + } + return nil +} + +// WaitForAllPodsRunning waits for all the pods to be in running state +func WaitForAllPodsRunning(ctx context.Context, client kubernetes.Interface, namespace string) error { + return WaitForPodListState(ctx, client, podsRunning, "PodsAreRunning", namespace) +} + +// WaitForPodRunning waits for the given pod to be in running state +func WaitForPodRunning(ctx context.Context, client kubernetes.Interface, name string, namespace string) error { + var p *corev1.Pod + pods := client.CoreV1().Pods(namespace) + waitErr := wait.PollImmediate(interval, podTimeout, func() (bool, error) { + var err error + p, err = pods.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return podRunning(p), nil + }) + if waitErr != nil { + return fmt.Errorf("pod %q did not reach the running state, got %+v: %w", name, p.Status.Phase, waitErr) + } + return nil +} + +// podsRunning will check the status conditions of the pod list and return true all pods are Running +func podsRunning(podList *corev1.PodList) (bool, error) { + // Pods are big, so use indexing, to avoid copying. + for i := range podList.Items { + if isRunning := podRunning(&podList.Items[i]); !isRunning { + return false, nil + } + } + return true, nil +} + +// podRunning will check the status conditions of the pod and return true if it's Running. +func podRunning(pod *corev1.Pod) bool { + return pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded +} + +// WaitForDeploymentScale waits until the given deployment has the expected scale. +func WaitForDeploymentScale(ctx context.Context, client kubernetes.Interface, name, namespace string, scale int) error { + return WaitForDeploymentState( + ctx, + client, + name, + func(d *appsv1.Deployment) (bool, error) { + return d.Status.ReadyReplicas == int32(scale), nil + }, + "DeploymentIsScaled", + namespace, + time.Minute, + ) +} diff --git a/vendor/knative.dev/pkg/test/presubmit-tests.sh b/vendor/knative.dev/pkg/test/presubmit-tests.sh new file mode 100644 index 00000000..e03367a1 --- /dev/null +++ b/vendor/knative.dev/pkg/test/presubmit-tests.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests, in the right order. +# It is started by prow for each PR. +# For convenience, it can also be executed manually. + +export GO111MODULE=on + +source $(dirname $0)/../vendor/knative.dev/hack/presubmit-tests.sh + +# TODO(#17): Write integration tests. + +# We use the default build, unit and integration test runners. + +function pre_build_tests() { + # Test the custom code generators. This makes sure we can compile the output + # of the injection generators. + $(dirname $0)/test-reconciler-codegen.sh + return 0 +} + +main $@ diff --git a/vendor/knative.dev/pkg/test/request.go b/vendor/knative.dev/pkg/test/request.go new file mode 100644 index 00000000..a22542e2 --- /dev/null +++ b/vendor/knative.dev/pkg/test/request.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// request contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package test + +import ( + "context" + "net/url" + "time" + + "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" +) + +// RequestOption enables configuration of requests +// when polling for endpoint states. +type RequestOption = spoof.RequestOption + +// WithHeader will add the provided headers to the request. +// +// Deprecated: Use the spoof package version +var WithHeader = spoof.WithHeader + +// IsOneOfStatusCodes checks that the response code is equal to the given one. +// +// Deprecated: Use the spoof package version +var IsOneOfStatusCodes = spoof.IsOneOfStatusCodes + +// IsStatusOK checks that the response code is a 200. +// +// Deprecated: Use the spoof package version +var IsStatusOK = spoof.IsStatusOK + +// MatchesAllBodies checks that the *first* response body matches the "expected" body, otherwise failing. +// +// Deprecated: Use the spoof package version +var MatchesAllBodies = spoof.MatchesAllBodies + +// MatchesBody checks that the *first* response body matches the "expected" body, otherwise failing. +// +// Deprecated: Use the spoof package version +var MatchesBody = spoof.MatchesBody + +// MatchesAllOf combines multiple ResponseCheckers to one ResponseChecker with a logical AND. The +// checkers are executed in order. The first function to trigger an error or a retry will short-circuit +// the other functions (they will not be executed). +// +// This is useful for combining a body with a status check like: +// MatchesAllOf(IsStatusOK, MatchesBody("test")) +// +// The MatchesBody check will only be executed after the IsStatusOK has passed. +// +// Deprecated: Use the spoof package version +var MatchesAllOf = spoof.MatchesAllOf + +// WaitForEndpointState will poll an endpoint until inState indicates the state is achieved, +// or default timeout is reached. +// If resolvableDomain is false, it will use kubeClientset to look up the ingress and spoof +// the domain in the request headers, otherwise it will make the request directly to domain. +// desc will be used to name the metric that is emitted to track how long it took for the +// domain to get into the state checked by inState. Commas in `desc` must be escaped. +func WaitForEndpointState( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + inState spoof.ResponseChecker, + desc string, + resolvable bool, + opts ...interface{}) (*spoof.Response, error) { + return WaitForEndpointStateWithTimeout(ctx, kubeClient, logf, url, inState, + desc, resolvable, Flags.SpoofRequestTimeout, opts...) +} + +// WaitForEndpointStateWithTimeout will poll an endpoint until inState indicates the state is achieved +// or the provided timeout is achieved. +// If resolvableDomain is false, it will use kubeClientset to look up the ingress and spoof +// the domain in the request headers, otherwise it will make the request directly to domain. +// desc will be used to name the metric that is emitted to track how long it took for the +// domain to get into the state checked by inState. Commas in `desc` must be escaped. +func WaitForEndpointStateWithTimeout( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + inState spoof.ResponseChecker, + desc string, + resolvable bool, + timeout time.Duration, + opts ...interface{}) (*spoof.Response, error) { + + client, rOpts, err := makeSpoofClient(ctx, kubeClient, logf, url, resolvable, timeout, opts...) + if err != nil { + return nil, err + } + return client.WaitForEndpointState(ctx, url, inState, desc, rOpts...) +} + +func makeSpoofClient( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + resolvable bool, + timeout time.Duration, + opts ...interface{}) (*spoof.SpoofingClient, []spoof.RequestOption, error) { + + var tOpts []spoof.TransportOption + var rOpts []spoof.RequestOption + + for _, opt := range opts { + switch o := opt.(type) { + case spoof.RequestOption: + rOpts = append(rOpts, o) + case spoof.TransportOption: + tOpts = append(tOpts, o) + } + } + + client, err := NewSpoofingClient(ctx, kubeClient, logf, url.Hostname(), resolvable, tOpts...) + if err != nil { + return nil, nil, err + } + client.RequestTimeout = timeout + + return client, rOpts, nil +} + +func CheckEndpointState( + ctx context.Context, + kubeClient kubernetes.Interface, + logf logging.FormatLogger, + url *url.URL, + inState spoof.ResponseChecker, + desc string, + resolvable bool, + opts ...interface{}, +) (*spoof.Response, error) { + client, rOpts, err := makeSpoofClient(ctx, kubeClient, logf, url, resolvable, Flags.SpoofRequestTimeout, opts...) + if err != nil { + return nil, err + } + return client.CheckEndpointState(ctx, url, inState, desc, rOpts...) +} diff --git a/vendor/knative.dev/pkg/test/spoof/error_checks.go b/vendor/knative.dev/pkg/test/spoof/error_checks.go new file mode 100644 index 00000000..b0638ad3 --- /dev/null +++ b/vendor/knative.dev/pkg/test/spoof/error_checks.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// spoof contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package spoof + +import ( + "errors" + "net" + "net/http" + "strings" +) + +func isTCPTimeout(err error) bool { + if err == nil { + return false + } + var errNet net.Error + if !errors.As(err, &errNet) { + return false + } + return errNet.Timeout() +} + +func isDNSError(err error) bool { + if err == nil { + return false + } + // Checking by casting to url.Error and casting the nested error + // seems to be not as robust as string check. + msg := strings.ToLower(err.Error()) + // Example error message: + // > Get http://this.url.does.not.exist: dial tcp: lookup this.url.does.not.exist on 127.0.0.1:53: no such host + return strings.Contains(msg, "no such host") || strings.Contains(msg, ":53") +} + +func isConnectionRefused(err error) bool { + // The alternative for the string check is: + // errNo := (((err.(*url.Error)).Err.(*net.OpError)).Err.(*os.SyscallError).Err).(syscall.Errno) + // if errNo == syscall.Errno(0x6f) {...} + // But with assertions, of course. + return err != nil && strings.Contains(err.Error(), "connect: connection refused") +} + +func isConnectionReset(err error) bool { + return err != nil && strings.Contains(err.Error(), "connection reset by peer") +} + +func isNoRouteToHostError(err error) bool { + return err != nil && strings.Contains(err.Error(), "connect: no route to host") +} + +func isResponseDNSError(resp *Response) bool { + // no such host with 502 is sent back from istio-ingressgateway when it fails to resolve domain. + return resp.StatusCode == http.StatusBadGateway && strings.Contains(string(resp.Body), "no such host") +} diff --git a/vendor/knative.dev/pkg/test/spoof/request.go b/vendor/knative.dev/pkg/test/spoof/request.go new file mode 100644 index 00000000..df26e8fa --- /dev/null +++ b/vendor/knative.dev/pkg/test/spoof/request.go @@ -0,0 +1,38 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spoof + +import "net/http" + +// RequestOption enables configuration of requests +// when polling for endpoint states. +type RequestOption func(*http.Request) + +// WithHeader will add the provided headers to the request. +func WithHeader(header http.Header) RequestOption { + return func(r *http.Request) { + if r.Header == nil { + r.Header = header + return + } + for key, values := range header { + for _, value := range values { + r.Header.Add(key, value) + } + } + } +} diff --git a/vendor/knative.dev/pkg/test/spoof/response_checks.go b/vendor/knative.dev/pkg/test/spoof/response_checks.go new file mode 100644 index 00000000..3e22fc7c --- /dev/null +++ b/vendor/knative.dev/pkg/test/spoof/response_checks.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spoof + +import ( + "fmt" + "net/http" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// MatchesBody checks that the *first* response body matches the "expected" body, otherwise failing. +func MatchesBody(expected string) ResponseChecker { + return func(resp *Response) (bool, error) { + if !strings.Contains(string(resp.Body), expected) { + // Returning (true, err) causes SpoofingClient.Poll to fail. + return true, fmt.Errorf("body = %s, want: %s", string(resp.Body), expected) + } + + return true, nil + } +} + +// MatchesAllOf combines multiple ResponseCheckers to one ResponseChecker with a logical AND. The +// checkers are executed in order. The first function to trigger an error or a retry will short-circuit +// the other functions (they will not be executed). +// +// This is useful for combining a body with a status check like: +// MatchesAllOf(IsStatusOK, MatchesBody("test")) +// +// The MatchesBody check will only be executed after the IsStatusOK has passed. +func MatchesAllOf(checkers ...ResponseChecker) ResponseChecker { + return func(resp *Response) (bool, error) { + for _, checker := range checkers { + if done, err := checker(resp); err != nil || !done { + return done, err + } + } + return true, nil + } +} + +// MatchesAllBodies checks that the *first* response body matches the "expected" body, otherwise failing. +func MatchesAllBodies(all ...string) ResponseChecker { + var m sync.Mutex + // This helps with two things: + // 1. we can use Equal on sets + // 2. it will collapse the duplicates + want := sets.NewString(all...) + seen := make(sets.String, len(all)) + + return func(resp *Response) (bool, error) { + bs := string(resp.Body) + for expected := range want { + if !strings.Contains(bs, expected) { + // See if the next one matches. + continue + } + + m.Lock() + defer m.Unlock() + seen.Insert(expected) + + // Stop once we've seen them all. + return want.Equal(seen), nil + } + + // Returning (true, err) causes SpoofingClient.Poll to fail. + return true, fmt.Errorf("body = %s, want one of: %s", bs, all) + } +} + +// IsStatusOK checks that the response code is a 200. +func IsStatusOK(resp *Response) (bool, error) { + return IsOneOfStatusCodes(http.StatusOK)(resp) +} + +// IsOneOfStatusCodes checks that the response code is equal to the given one. +func IsOneOfStatusCodes(codes ...int) ResponseChecker { + return func(resp *Response) (bool, error) { + for _, code := range codes { + if resp.StatusCode == code { + return true, nil + } + } + + return true, fmt.Errorf("status = %d %s, want one of: %v", resp.StatusCode, resp.Status, codes) + } +} diff --git a/vendor/knative.dev/pkg/test/spoof/spoof.go b/vendor/knative.dev/pkg/test/spoof/spoof.go new file mode 100644 index 00000000..319e2ead --- /dev/null +++ b/vendor/knative.dev/pkg/test/spoof/spoof.go @@ -0,0 +1,360 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// spoof contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package spoof + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/zipkin" + "knative.dev/pkg/tracing/propagation/tracecontextb3" + + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" +) + +// Response is a stripped down subset of http.Response. The is primarily useful +// for ResponseCheckers to inspect the response body without consuming it. +// Notably, Body is a byte slice instead of an io.ReadCloser. +type Response struct { + Status string + StatusCode int + Header http.Header + Body []byte +} + +func (r *Response) String() string { + return fmt.Sprintf("status: %d, body: %s, headers: %v", r.StatusCode, string(r.Body), r.Header) +} + +// https://medium.com/stupid-gopher-tricks/ensuring-go-interface-satisfaction-at-compile-time-1ed158e8fa17 +var dialContext = (&net.Dialer{}).DialContext + +// ResponseChecker is used to determine when SpoofingClient.Poll is done polling. +// This allows you to predicate wait.PollImmediate on the request's http.Response. +// +// See the apimachinery wait package: +// https://github.com/kubernetes/apimachinery/blob/cf7ae2f57dabc02a3d215f15ca61ae1446f3be8f/pkg/util/wait/wait.go#L172 +type ResponseChecker func(resp *Response) (done bool, err error) + +// ErrorRetryChecker is used to determine if an error should be retried or not. +// If an error should be retried, it should return true and the wrapped error to explain why to retry. +type ErrorRetryChecker func(e error) (retry bool, err error) + +// ResponseRetryChecker is used to determine if a response should be retried or not. +// If a response should be retried, it should return true and an error to explain why to retry. +// +// This is distinct from ResponseChecker in that it shall be used to retry responses, +// where the HTTP request was technically successful (it returned something) but indicates +// an error (e.g. the overload page of a loadbalancer). +type ResponseRetryChecker func(resp *Response) (retry bool, err error) + +// SpoofingClient is a minimal HTTP client wrapper that spoofs the domain of requests +// for non-resolvable domains. +type SpoofingClient struct { + Client *http.Client + RequestInterval time.Duration + RequestTimeout time.Duration + Logf logging.FormatLogger +} + +// TransportOption allows callers to customize the http.Transport used by a SpoofingClient +type TransportOption func(transport *http.Transport) *http.Transport + +// New returns a SpoofingClient that rewrites requests if the target domain is not `resolvable`. +// It does this by looking up the ingress at construction time, so reusing a client will not +// follow the ingress if it moves (or if there are multiple ingresses). +// +// If that's a problem, see test/request.go#WaitForEndpointState for oneshot spoofing. +func New( + ctx context.Context, + kubeClientset kubernetes.Interface, + logf logging.FormatLogger, + domain string, + resolvable bool, + endpointOverride string, + requestInterval, requestTimeout time.Duration, + opts ...TransportOption) (*SpoofingClient, error) { + endpoint, mapper, err := ResolveEndpoint(ctx, kubeClientset, domain, resolvable, endpointOverride) + if err != nil { + return nil, fmt.Errorf("failed to get the cluster endpoint: %w", err) + } + + // Spoof the hostname at the resolver level + logf("Spoofing %s -> %s", domain, endpoint) + transport := &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) { + _, port, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + // The original hostname:port is spoofed by replacing the hostname by the value + // returned by ResolveEndpoint. + return dialContext(ctx, network, net.JoinHostPort(endpoint, mapper(port))) + }, + } + + for _, opt := range opts { + transport = opt(transport) + } + + // Enable Zipkin tracing + roundTripper := &ochttp.Transport{ + Base: transport, + Propagation: tracecontextb3.TraceContextB3Egress, + } + + sc := &SpoofingClient{ + Client: &http.Client{Transport: roundTripper}, + RequestInterval: requestInterval, + RequestTimeout: requestTimeout, + Logf: logf, + } + return sc, nil +} + +// ResolveEndpoint resolves the endpoint address considering whether the domain is resolvable and taking into +// account whether the user overrode the endpoint address externally +func ResolveEndpoint(ctx context.Context, kubeClientset kubernetes.Interface, domain string, resolvable bool, endpointOverride string) (string, func(string) string, error) { + id := func(in string) string { return in } + // If the domain is resolvable, it can be used directly + if resolvable { + return domain, id, nil + } + // Otherwise, use the actual cluster endpoint + return ingress.GetIngressEndpoint(ctx, kubeClientset, endpointOverride) +} + +// Do dispatches to the underlying http.Client.Do, spoofing domains as needed +// and transforming the http.Response into a spoof.Response. +// Each response is augmented with "ZipkinTraceID" header that identifies the zipkin trace corresponding to the request. +func (sc *SpoofingClient) Do(req *http.Request, errorRetryCheckers ...interface{}) (*Response, error) { + return sc.Poll(req, func(*Response) (bool, error) { return true, nil }, errorRetryCheckers...) +} + +// Poll executes an http request until it satisfies the inState condition or, if there's an error, +// none of the error retry checkers permit a retry. +// If no retry checkers are specified `DefaultErrorRetryChecker` will be used. +func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker, checkers ...interface{}) (*Response, error) { + if len(checkers) == 0 { + checkers = []interface{}{ErrorRetryChecker(DefaultErrorRetryChecker), ResponseRetryChecker(DefaultResponseRetryChecker)} + } + + var resp *Response + err := wait.PollImmediate(sc.RequestInterval, sc.RequestTimeout, func() (bool, error) { + // Starting span to capture zipkin trace. + traceContext, span := trace.StartSpan(req.Context(), "SpoofingClient-Trace") + defer span.End() + rawResp, err := sc.Client.Do(req.WithContext(traceContext)) + if err != nil { + for _, checker := range checkers { + if ec, ok := checker.(ErrorRetryChecker); ok { + retry, newErr := ec(err) + if retry { + sc.Logf("Retrying %s: %v", req.URL.String(), newErr) + return false, nil + } + } + } + sc.Logf("NOT Retrying %s: %v", req.URL.String(), err) + return true, err + } + defer rawResp.Body.Close() + + body, err := io.ReadAll(rawResp.Body) + if err != nil { + return true, err + } + rawResp.Header.Add(zipkin.ZipkinTraceIDHeader, span.SpanContext().TraceID.String()) + + resp = &Response{ + Status: rawResp.Status, + StatusCode: rawResp.StatusCode, + Header: rawResp.Header, + Body: body, + } + + // This is distinct from inState in that it allows us to uniformly check for + // error responses to retry HTTP requests that have technically been successful, + // but haven't reached their destination (e.g. got a loadbalancer overload page). + for _, checker := range checkers { + if rc, ok := checker.(ResponseRetryChecker); ok { + retry, newErr := rc(resp) + if retry { + sc.Logf("Retrying %s: %v", req.URL.String(), newErr) + return false, nil + } + } + } + + return inState(resp) + }) + + if resp != nil { + sc.logZipkinTrace(resp) + } + + if err != nil { + return resp, fmt.Errorf("response: %s did not pass checks: %w", resp, err) + } + return resp, nil +} + +// DefaultErrorRetryChecker implements the defaults for retrying on error. +func DefaultErrorRetryChecker(err error) (bool, error) { + if isTCPTimeout(err) { + return true, fmt.Errorf("retrying for TCP timeout: %w", err) + } + // Retrying on DNS error, since we may be using sslip.io or nip.io in tests. + if isDNSError(err) { + return true, fmt.Errorf("retrying for DNS error: %w", err) + } + // Repeat the poll on `connection refused` errors, which are usually transient Istio errors. + if isConnectionRefused(err) { + return true, fmt.Errorf("retrying for connection refused: %w", err) + } + if isConnectionReset(err) { + return true, fmt.Errorf("retrying for connection reset: %w", err) + } + // Retry on connection/network errors. + if errors.Is(err, io.EOF) { + return true, fmt.Errorf("retrying for: %w", err) + } + // No route to host errors are in the same category as connection refused errors and + // are usually transient. + if isNoRouteToHostError(err) { + return true, fmt.Errorf("retrying for 'no route to host' error: %w", err) + } + return false, err +} + +// DefaultResponseRetryChecker implements the defaults for retrying on response. +func DefaultResponseRetryChecker(resp *Response) (bool, error) { + if isResponseDNSError(resp) { + return true, fmt.Errorf("retrying for DNS related failure response: %v", resp) + } + return false, nil +} + +// logZipkinTrace provides support to log Zipkin Trace for param: spoofResponse +// We only log Zipkin trace for HTTP server errors i.e for HTTP status codes between 500 to 600 +func (sc *SpoofingClient) logZipkinTrace(spoofResp *Response) { + if !zipkin.IsTracingEnabled() || spoofResp.StatusCode < http.StatusInternalServerError || spoofResp.StatusCode >= 600 { + return + } + + traceID := spoofResp.Header.Get(zipkin.ZipkinTraceIDHeader) + sc.Logf("Logging Zipkin Trace for: %s", traceID) + + json, err := zipkin.JSONTrace(traceID /* We don't know the expected number of spans */, -1, 5*time.Second) + if err != nil { + var errTimeout *zipkin.TimeoutError + if !errors.As(err, &errTimeout) { + sc.Logf("Error getting zipkin trace: %v", err) + } + } + + sc.Logf("%s", json) +} + +func (sc *SpoofingClient) WaitForEndpointState( + ctx context.Context, + url *url.URL, + inState ResponseChecker, + desc string, + opts ...RequestOption) (*Response, error) { + + return sc.endpointState( + ctx, + url, + inState, + desc, + func(req *http.Request, check ResponseChecker) (*Response, error) { return sc.Poll(req, check) }, + "WaitForEndpointState", + opts...) +} + +func (sc *SpoofingClient) endpointState( + ctx context.Context, + url *url.URL, + inState ResponseChecker, + desc string, + f func(*http.Request, ResponseChecker) (*Response, error), + logName string, + opts ...RequestOption) (*Response, error) { + defer logging.GetEmitableSpan(ctx, logName+"/"+desc).End() + + if url.Scheme == "" || url.Host == "" { + return nil, fmt.Errorf("invalid URL: %q", url.String()) + } + + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return nil, err + } + + for _, opt := range opts { + opt(req) + } + + return f(req, inState) +} + +func (sc *SpoofingClient) Check(req *http.Request, inState ResponseChecker, checkers ...interface{}) (*Response, error) { + resp, err := sc.Do(req, checkers...) + if err != nil { + return nil, err + } + + ok, err := inState(resp) + if err != nil { + return resp, fmt.Errorf("response: %s did not pass checks: %w", resp, err) + } + if ok { + return resp, nil + } + + return nil, err +} + +func (sc *SpoofingClient) CheckEndpointState( + ctx context.Context, + url *url.URL, + inState ResponseChecker, + desc string, + opts ...RequestOption) (*Response, error) { + return sc.endpointState( + ctx, + url, + inState, + desc, + func(req *http.Request, check ResponseChecker) (*Response, error) { return sc.Check(req, check) }, + "CheckEndpointState", + opts...) +} diff --git a/vendor/knative.dev/pkg/test/test-reconciler-codegen.sh b/vendor/knative.dev/pkg/test/test-reconciler-codegen.sh new file mode 100644 index 00000000..c6bb3e20 --- /dev/null +++ b/vendor/knative.dev/pkg/test/test-reconciler-codegen.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../vendor/knative.dev/hack/library.sh + +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/knative.dev/pkg 2>/dev/null || echo ../pkg)} + +GENCLIENT_PKG=knative.dev/pkg/test/genclient + +echo "Pre-deleting $(dirname $0)/genclient" +rm -rf $(dirname $0)/genclient + +header "Test Generated Reconciler Builds." + +chmod +x ${CODEGEN_PKG}/generate-groups.sh + +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + ${GENCLIENT_PKG} knative.dev/pkg/apis/test \ + "example:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + ${GENCLIENT_PKG} knative.dev/pkg/apis/test \ + "example:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ + --force-genreconciler-kinds "Foo" + +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + ${GENCLIENT_PKG}/pub knative.dev/pkg/apis/test \ + "pub:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + ${GENCLIENT_PKG}/pub knative.dev/pkg/apis/test \ + "pub:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +if ! go build -v $(dirname $0)/genclient/... ; then + exit 1 +fi + +echo "Finished, deleting $(dirname $0)/genclient" +rm -rf $(dirname $0)/genclient diff --git a/vendor/knative.dev/pkg/test/tinterface.go b/vendor/knative.dev/pkg/test/tinterface.go new file mode 100644 index 00000000..530ff0a2 --- /dev/null +++ b/vendor/knative.dev/pkg/test/tinterface.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Defines an interface of commonality between testing.T and logging.TLogger +// Allows most library functions to be shared +// Simplifies coexistence with TLogger + +package test + +// T is an interface mimicking *testing.T. +// Deprecated: Do not use this. Define your own interface. +type T interface { + Name() string + Helper() + SkipNow() + Cleanup(func()) + Log(args ...interface{}) + Error(args ...interface{}) +} + +// TLegacy is an interface mimicking *testing.T. +// Deprecated: Do not use this. Define your own interface. +type TLegacy interface { + T + Logf(fmt string, args ...interface{}) // It gets passed to things in logstream + Fatal(args ...interface{}) +} diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS new file mode 100644 index 00000000..64660c9e --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-reviewers diff --git a/vendor/modules.txt b/vendor/modules.txt index 426d203a..673e03cd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -978,6 +978,7 @@ knative.dev/eventing/hack knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/duck knative.dev/eventing/pkg/apis/duck/v1 +knative.dev/eventing/pkg/apis/duck/v1beta1 knative.dev/eventing/pkg/apis/eventing knative.dev/eventing/pkg/apis/eventing/v1 knative.dev/eventing/pkg/apis/eventing/v1beta1 @@ -1025,6 +1026,7 @@ knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2 knative.dev/eventing/pkg/client/injection/client knative.dev/eventing/pkg/client/injection/client/fake knative.dev/eventing/pkg/client/injection/informers/eventing/v1/broker +knative.dev/eventing/pkg/client/injection/informers/eventing/v1/trigger knative.dev/eventing/pkg/client/injection/informers/eventing/v1beta2/eventtype knative.dev/eventing/pkg/client/injection/informers/factory knative.dev/eventing/pkg/client/injection/reconciler/eventing/v1/broker @@ -1040,6 +1042,7 @@ knative.dev/eventing/pkg/reconciler/testing knative.dev/eventing/pkg/reconciler/testing/scheme knative.dev/eventing/pkg/reconciler/testing/v1 knative.dev/eventing/pkg/reconciler/testing/v1beta2 +knative.dev/eventing/test/lib/resources # knative.dev/hack v0.0.0-20231122182901-eb352426ecc1 ## explicit; go 1.18 knative.dev/hack @@ -1095,12 +1098,16 @@ knative.dev/pkg/resolver knative.dev/pkg/signals knative.dev/pkg/system knative.dev/pkg/system/testing +knative.dev/pkg/test +knative.dev/pkg/test/environment knative.dev/pkg/test/helpers +knative.dev/pkg/test/ingress knative.dev/pkg/test/logging knative.dev/pkg/test/mako/config knative.dev/pkg/test/mako/stub-sidecar knative.dev/pkg/test/monitoring knative.dev/pkg/test/security +knative.dev/pkg/test/spoof knative.dev/pkg/test/zipkin knative.dev/pkg/third_party/mako/proto/quickstore_go_proto knative.dev/pkg/tracing diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS new file mode 100644 index 00000000..0fadafbd --- /dev/null +++ b/vendor/sigs.k8s.io/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - lavalamp + - liggitt diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 00000000..003a149e --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 00000000..73be0a3a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery