diff --git a/apis/anomaly/v1alpha1/zz_detection_terraformed.go b/apis/anomaly/v1alpha1/zz_detection_terraformed.go new file mode 100755 index 0000000..b17f492 --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_detection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Detection +func (mg *Detection) GetTerraformResourceType() string { + return "opensearch_anomaly_detection" +} + +// GetConnectionDetailsMapping for this Detection +func (tr *Detection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Detection +func (tr *Detection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Detection +func (tr *Detection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Detection +func (tr *Detection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Detection +func (tr *Detection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Detection +func (tr *Detection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Detection +func (tr *Detection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Detection +func (tr *Detection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Detection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Detection) LateInitialize(attrs []byte) (bool, error) { + params := &DetectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Detection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/anomaly/v1alpha1/zz_detection_types.go b/apis/anomaly/v1alpha1/zz_detection_types.go new file mode 100755 index 0000000..74cb04e --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_detection_types.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DetectionInitParameters struct { + + // (String) The anomaly detection document + // The anomaly detection document + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +type DetectionObservation struct { + + // (String) The anomaly detection document + // The anomaly detection document + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DetectionParameters struct { + + // (String) The anomaly detection document + // The anomaly detection document + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +// DetectionSpec defines the desired state of Detection +type DetectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DetectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DetectionInitParameters `json:"initProvider,omitempty"` +} + +// DetectionStatus defines the observed state of Detection. +type DetectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DetectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Detection is the Schema for the Detections API. Provides an OpenSearch anonaly detection. Please refer to the OpenSearch anomaly detection documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Detection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + Spec DetectionSpec `json:"spec"` + Status DetectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DetectionList contains a list of Detections +type DetectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Detection `json:"items"` +} + +// Repository type metadata. +var ( + Detection_Kind = "Detection" + Detection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Detection_Kind}.String() + Detection_KindAPIVersion = Detection_Kind + "." + CRDGroupVersion.String() + Detection_GroupVersionKind = CRDGroupVersion.WithKind(Detection_Kind) +) + +func init() { + SchemeBuilder.Register(&Detection{}, &DetectionList{}) +} diff --git a/apis/anomaly/v1alpha1/zz_generated.conversion_hubs.go b/apis/anomaly/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..745b404 --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Detection) Hub() {} diff --git a/apis/anomaly/v1alpha1/zz_generated.deepcopy.go b/apis/anomaly/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a0b10ac --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,168 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Detection) DeepCopyInto(out *Detection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Detection. +func (in *Detection) DeepCopy() *Detection { + if in == nil { + return nil + } + out := new(Detection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Detection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionInitParameters) DeepCopyInto(out *DetectionInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionInitParameters. +func (in *DetectionInitParameters) DeepCopy() *DetectionInitParameters { + if in == nil { + return nil + } + out := new(DetectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionList) DeepCopyInto(out *DetectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Detection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionList. +func (in *DetectionList) DeepCopy() *DetectionList { + if in == nil { + return nil + } + out := new(DetectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DetectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionObservation) DeepCopyInto(out *DetectionObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionObservation. +func (in *DetectionObservation) DeepCopy() *DetectionObservation { + if in == nil { + return nil + } + out := new(DetectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionParameters) DeepCopyInto(out *DetectionParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionParameters. +func (in *DetectionParameters) DeepCopy() *DetectionParameters { + if in == nil { + return nil + } + out := new(DetectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionSpec) DeepCopyInto(out *DetectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionSpec. +func (in *DetectionSpec) DeepCopy() *DetectionSpec { + if in == nil { + return nil + } + out := new(DetectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectionStatus) DeepCopyInto(out *DetectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectionStatus. +func (in *DetectionStatus) DeepCopy() *DetectionStatus { + if in == nil { + return nil + } + out := new(DetectionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/anomaly/v1alpha1/zz_generated.managed.go b/apis/anomaly/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..7f5f63b --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Detection. +func (mg *Detection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Detection. +func (mg *Detection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Detection. +func (mg *Detection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Detection. +func (mg *Detection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Detection. +func (mg *Detection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Detection. +func (mg *Detection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Detection. +func (mg *Detection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Detection. +func (mg *Detection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Detection. +func (mg *Detection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Detection. +func (mg *Detection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Detection. +func (mg *Detection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Detection. +func (mg *Detection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/anomaly/v1alpha1/zz_generated.managedlist.go b/apis/anomaly/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..da4b6d2 --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DetectionList. +func (l *DetectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/anomaly/v1alpha1/zz_groupversion_info.go b/apis/anomaly/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..b9054c7 --- /dev/null +++ b/apis/anomaly/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=anomaly.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "anomaly.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/audit/v1alpha1/zz_config_terraformed.go b/apis/audit/v1alpha1/zz_config_terraformed.go new file mode 100755 index 0000000..1f37197 --- /dev/null +++ b/apis/audit/v1alpha1/zz_config_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Config +func (mg *Config) GetTerraformResourceType() string { + return "opensearch_audit_config" +} + +// GetConnectionDetailsMapping for this Config +func (tr *Config) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Config +func (tr *Config) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Config +func (tr *Config) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Config +func (tr *Config) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Config +func (tr *Config) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Config +func (tr *Config) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Config +func (tr *Config) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Config +func (tr *Config) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Config using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Config) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Config) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/audit/v1alpha1/zz_config_types.go b/apis/audit/v1alpha1/zz_config_types.go new file mode 100755 index 0000000..41e4aa3 --- /dev/null +++ b/apis/audit/v1alpha1/zz_config_types.go @@ -0,0 +1,389 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuditInitParameters struct { + + // (Set of String) + // +listType=set + DisabledRestCategories []*string `json:"disabledRestCategories,omitempty" tf:"disabled_rest_categories,omitempty"` + + // (Set of String) + // +listType=set + DisabledTransportCategories []*string `json:"disabledTransportCategories,omitempty" tf:"disabled_transport_categories,omitempty"` + + // (Boolean) + EnableRest *bool `json:"enableRest,omitempty" tf:"enable_rest,omitempty"` + + // (Boolean) + EnableTransport *bool `json:"enableTransport,omitempty" tf:"enable_transport,omitempty"` + + // (Boolean) + ExcludeSensitiveHeaders *bool `json:"excludeSensitiveHeaders,omitempty" tf:"exclude_sensitive_headers,omitempty"` + + // (Set of String) + // +listType=set + IgnoreRequests []*string `json:"ignoreRequests,omitempty" tf:"ignore_requests,omitempty"` + + // (Set of String) + // +listType=set + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` + + // (Boolean) + LogRequestBody *bool `json:"logRequestBody,omitempty" tf:"log_request_body,omitempty"` + + // (Boolean) + ResolveBulkRequests *bool `json:"resolveBulkRequests,omitempty" tf:"resolve_bulk_requests,omitempty"` + + // (Boolean) + ResolveIndices *bool `json:"resolveIndices,omitempty" tf:"resolve_indices,omitempty"` +} + +type AuditObservation struct { + + // (Set of String) + // +listType=set + DisabledRestCategories []*string `json:"disabledRestCategories,omitempty" tf:"disabled_rest_categories,omitempty"` + + // (Set of String) + // +listType=set + DisabledTransportCategories []*string `json:"disabledTransportCategories,omitempty" tf:"disabled_transport_categories,omitempty"` + + // (Boolean) + EnableRest *bool `json:"enableRest,omitempty" tf:"enable_rest,omitempty"` + + // (Boolean) + EnableTransport *bool `json:"enableTransport,omitempty" tf:"enable_transport,omitempty"` + + // (Boolean) + ExcludeSensitiveHeaders *bool `json:"excludeSensitiveHeaders,omitempty" tf:"exclude_sensitive_headers,omitempty"` + + // (Set of String) + // +listType=set + IgnoreRequests []*string `json:"ignoreRequests,omitempty" tf:"ignore_requests,omitempty"` + + // (Set of String) + // +listType=set + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` + + // (Boolean) + LogRequestBody *bool `json:"logRequestBody,omitempty" tf:"log_request_body,omitempty"` + + // (Boolean) + ResolveBulkRequests *bool `json:"resolveBulkRequests,omitempty" tf:"resolve_bulk_requests,omitempty"` + + // (Boolean) + ResolveIndices *bool `json:"resolveIndices,omitempty" tf:"resolve_indices,omitempty"` +} + +type AuditParameters struct { + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + DisabledRestCategories []*string `json:"disabledRestCategories,omitempty" tf:"disabled_rest_categories,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + DisabledTransportCategories []*string `json:"disabledTransportCategories,omitempty" tf:"disabled_transport_categories,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + EnableRest *bool `json:"enableRest,omitempty" tf:"enable_rest,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + EnableTransport *bool `json:"enableTransport,omitempty" tf:"enable_transport,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + ExcludeSensitiveHeaders *bool `json:"excludeSensitiveHeaders,omitempty" tf:"exclude_sensitive_headers,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + IgnoreRequests []*string `json:"ignoreRequests,omitempty" tf:"ignore_requests,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + IgnoreUsers []*string `json:"ignoreUsers,omitempty" tf:"ignore_users,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + LogRequestBody *bool `json:"logRequestBody,omitempty" tf:"log_request_body,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + ResolveBulkRequests *bool `json:"resolveBulkRequests,omitempty" tf:"resolve_bulk_requests,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + ResolveIndices *bool `json:"resolveIndices,omitempty" tf:"resolve_indices,omitempty"` +} + +type ComplianceInitParameters struct { + + // (Boolean) + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // (Boolean) + ExternalConfig *bool `json:"externalConfig,omitempty" tf:"external_config,omitempty"` + + // (Boolean) + InternalConfig *bool `json:"internalConfig,omitempty" tf:"internal_config,omitempty"` + + // (Set of String) + // +listType=set + ReadIgnoreUsers []*string `json:"readIgnoreUsers,omitempty" tf:"read_ignore_users,omitempty"` + + // (Boolean) + ReadMetadataOnly *bool `json:"readMetadataOnly,omitempty" tf:"read_metadata_only,omitempty"` + + // (Block Set) (see below for nested schema) + ReadWatchedField []ReadWatchedFieldInitParameters `json:"readWatchedField,omitempty" tf:"read_watched_field,omitempty"` + + // (Set of String) + // +listType=set + WriteIgnoreUsers []*string `json:"writeIgnoreUsers,omitempty" tf:"write_ignore_users,omitempty"` + + // (Boolean) + WriteLogDiffs *bool `json:"writeLogDiffs,omitempty" tf:"write_log_diffs,omitempty"` + + // (Boolean) + WriteMetadataOnly *bool `json:"writeMetadataOnly,omitempty" tf:"write_metadata_only,omitempty"` + + // (Set of String) + // +listType=set + WriteWatchedIndices []*string `json:"writeWatchedIndices,omitempty" tf:"write_watched_indices,omitempty"` +} + +type ComplianceObservation struct { + + // (Boolean) + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // (Boolean) + ExternalConfig *bool `json:"externalConfig,omitempty" tf:"external_config,omitempty"` + + // (Boolean) + InternalConfig *bool `json:"internalConfig,omitempty" tf:"internal_config,omitempty"` + + // (Set of String) + // +listType=set + ReadIgnoreUsers []*string `json:"readIgnoreUsers,omitempty" tf:"read_ignore_users,omitempty"` + + // (Boolean) + ReadMetadataOnly *bool `json:"readMetadataOnly,omitempty" tf:"read_metadata_only,omitempty"` + + // (Block Set) (see below for nested schema) + ReadWatchedField []ReadWatchedFieldObservation `json:"readWatchedField,omitempty" tf:"read_watched_field,omitempty"` + + // (Set of String) + // +listType=set + WriteIgnoreUsers []*string `json:"writeIgnoreUsers,omitempty" tf:"write_ignore_users,omitempty"` + + // (Boolean) + WriteLogDiffs *bool `json:"writeLogDiffs,omitempty" tf:"write_log_diffs,omitempty"` + + // (Boolean) + WriteMetadataOnly *bool `json:"writeMetadataOnly,omitempty" tf:"write_metadata_only,omitempty"` + + // (Set of String) + // +listType=set + WriteWatchedIndices []*string `json:"writeWatchedIndices,omitempty" tf:"write_watched_indices,omitempty"` +} + +type ComplianceParameters struct { + + // (Boolean) + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + ExternalConfig *bool `json:"externalConfig,omitempty" tf:"external_config,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + InternalConfig *bool `json:"internalConfig,omitempty" tf:"internal_config,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + ReadIgnoreUsers []*string `json:"readIgnoreUsers,omitempty" tf:"read_ignore_users,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + ReadMetadataOnly *bool `json:"readMetadataOnly,omitempty" tf:"read_metadata_only,omitempty"` + + // (Block Set) (see below for nested schema) + // +kubebuilder:validation:Optional + ReadWatchedField []ReadWatchedFieldParameters `json:"readWatchedField,omitempty" tf:"read_watched_field,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + WriteIgnoreUsers []*string `json:"writeIgnoreUsers,omitempty" tf:"write_ignore_users,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + WriteLogDiffs *bool `json:"writeLogDiffs,omitempty" tf:"write_log_diffs,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + WriteMetadataOnly *bool `json:"writeMetadataOnly,omitempty" tf:"write_metadata_only,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + WriteWatchedIndices []*string `json:"writeWatchedIndices,omitempty" tf:"write_watched_indices,omitempty"` +} + +type ConfigInitParameters struct { + + // (Block Set, Max: 1) (see below for nested schema) + Audit []AuditInitParameters `json:"audit,omitempty" tf:"audit,omitempty"` + + // (Block Set, Max: 1) (see below for nested schema) + Compliance []ComplianceInitParameters `json:"compliance,omitempty" tf:"compliance,omitempty"` + + // (Boolean) + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ConfigObservation struct { + + // (Block Set, Max: 1) (see below for nested schema) + Audit []AuditObservation `json:"audit,omitempty" tf:"audit,omitempty"` + + // (Block Set, Max: 1) (see below for nested schema) + Compliance []ComplianceObservation `json:"compliance,omitempty" tf:"compliance,omitempty"` + + // (Boolean) + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ConfigParameters struct { + + // (Block Set, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Audit []AuditParameters `json:"audit,omitempty" tf:"audit,omitempty"` + + // (Block Set, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Compliance []ComplianceParameters `json:"compliance,omitempty" tf:"compliance,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ReadWatchedFieldInitParameters struct { + + // (Set of String) + // +listType=set + Fields []*string `json:"fields,omitempty" tf:"fields,omitempty"` + + // (String) + Index *string `json:"index,omitempty" tf:"index,omitempty"` +} + +type ReadWatchedFieldObservation struct { + + // (Set of String) + // +listType=set + Fields []*string `json:"fields,omitempty" tf:"fields,omitempty"` + + // (String) + Index *string `json:"index,omitempty" tf:"index,omitempty"` +} + +type ReadWatchedFieldParameters struct { + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + Fields []*string `json:"fields" tf:"fields,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + Index *string `json:"index" tf:"index,omitempty"` +} + +// ConfigSpec defines the desired state of Config +type ConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigInitParameters `json:"initProvider,omitempty"` +} + +// ConfigStatus defines the observed state of Config. +type ConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Config is the Schema for the Configs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.enabled) || (has(self.initProvider) && has(self.initProvider.enabled))",message="spec.forProvider.enabled is a required parameter" + Spec ConfigSpec `json:"spec"` + Status ConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigList contains a list of Configs +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Config `json:"items"` +} + +// Repository type metadata. +var ( + Config_Kind = "Config" + Config_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Config_Kind}.String() + Config_KindAPIVersion = Config_Kind + "." + CRDGroupVersion.String() + Config_GroupVersionKind = CRDGroupVersion.WithKind(Config_Kind) +) + +func init() { + SchemeBuilder.Register(&Config{}, &ConfigList{}) +} diff --git a/apis/audit/v1alpha1/zz_generated.conversion_hubs.go b/apis/audit/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..8f50dc4 --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Config) Hub() {} diff --git a/apis/audit/v1alpha1/zz_generated.deepcopy.go b/apis/audit/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..38818d9 --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,825 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditInitParameters) DeepCopyInto(out *AuditInitParameters) { + *out = *in + if in.DisabledRestCategories != nil { + in, out := &in.DisabledRestCategories, &out.DisabledRestCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledTransportCategories != nil { + in, out := &in.DisabledTransportCategories, &out.DisabledTransportCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableRest != nil { + in, out := &in.EnableRest, &out.EnableRest + *out = new(bool) + **out = **in + } + if in.EnableTransport != nil { + in, out := &in.EnableTransport, &out.EnableTransport + *out = new(bool) + **out = **in + } + if in.ExcludeSensitiveHeaders != nil { + in, out := &in.ExcludeSensitiveHeaders, &out.ExcludeSensitiveHeaders + *out = new(bool) + **out = **in + } + if in.IgnoreRequests != nil { + in, out := &in.IgnoreRequests, &out.IgnoreRequests + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogRequestBody != nil { + in, out := &in.LogRequestBody, &out.LogRequestBody + *out = new(bool) + **out = **in + } + if in.ResolveBulkRequests != nil { + in, out := &in.ResolveBulkRequests, &out.ResolveBulkRequests + *out = new(bool) + **out = **in + } + if in.ResolveIndices != nil { + in, out := &in.ResolveIndices, &out.ResolveIndices + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditInitParameters. +func (in *AuditInitParameters) DeepCopy() *AuditInitParameters { + if in == nil { + return nil + } + out := new(AuditInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditObservation) DeepCopyInto(out *AuditObservation) { + *out = *in + if in.DisabledRestCategories != nil { + in, out := &in.DisabledRestCategories, &out.DisabledRestCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledTransportCategories != nil { + in, out := &in.DisabledTransportCategories, &out.DisabledTransportCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableRest != nil { + in, out := &in.EnableRest, &out.EnableRest + *out = new(bool) + **out = **in + } + if in.EnableTransport != nil { + in, out := &in.EnableTransport, &out.EnableTransport + *out = new(bool) + **out = **in + } + if in.ExcludeSensitiveHeaders != nil { + in, out := &in.ExcludeSensitiveHeaders, &out.ExcludeSensitiveHeaders + *out = new(bool) + **out = **in + } + if in.IgnoreRequests != nil { + in, out := &in.IgnoreRequests, &out.IgnoreRequests + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogRequestBody != nil { + in, out := &in.LogRequestBody, &out.LogRequestBody + *out = new(bool) + **out = **in + } + if in.ResolveBulkRequests != nil { + in, out := &in.ResolveBulkRequests, &out.ResolveBulkRequests + *out = new(bool) + **out = **in + } + if in.ResolveIndices != nil { + in, out := &in.ResolveIndices, &out.ResolveIndices + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditObservation. +func (in *AuditObservation) DeepCopy() *AuditObservation { + if in == nil { + return nil + } + out := new(AuditObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditParameters) DeepCopyInto(out *AuditParameters) { + *out = *in + if in.DisabledRestCategories != nil { + in, out := &in.DisabledRestCategories, &out.DisabledRestCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledTransportCategories != nil { + in, out := &in.DisabledTransportCategories, &out.DisabledTransportCategories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableRest != nil { + in, out := &in.EnableRest, &out.EnableRest + *out = new(bool) + **out = **in + } + if in.EnableTransport != nil { + in, out := &in.EnableTransport, &out.EnableTransport + *out = new(bool) + **out = **in + } + if in.ExcludeSensitiveHeaders != nil { + in, out := &in.ExcludeSensitiveHeaders, &out.ExcludeSensitiveHeaders + *out = new(bool) + **out = **in + } + if in.IgnoreRequests != nil { + in, out := &in.IgnoreRequests, &out.IgnoreRequests + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IgnoreUsers != nil { + in, out := &in.IgnoreUsers, &out.IgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogRequestBody != nil { + in, out := &in.LogRequestBody, &out.LogRequestBody + *out = new(bool) + **out = **in + } + if in.ResolveBulkRequests != nil { + in, out := &in.ResolveBulkRequests, &out.ResolveBulkRequests + *out = new(bool) + **out = **in + } + if in.ResolveIndices != nil { + in, out := &in.ResolveIndices, &out.ResolveIndices + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditParameters. +func (in *AuditParameters) DeepCopy() *AuditParameters { + if in == nil { + return nil + } + out := new(AuditParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceInitParameters) DeepCopyInto(out *ComplianceInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExternalConfig != nil { + in, out := &in.ExternalConfig, &out.ExternalConfig + *out = new(bool) + **out = **in + } + if in.InternalConfig != nil { + in, out := &in.InternalConfig, &out.InternalConfig + *out = new(bool) + **out = **in + } + if in.ReadIgnoreUsers != nil { + in, out := &in.ReadIgnoreUsers, &out.ReadIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadMetadataOnly != nil { + in, out := &in.ReadMetadataOnly, &out.ReadMetadataOnly + *out = new(bool) + **out = **in + } + if in.ReadWatchedField != nil { + in, out := &in.ReadWatchedField, &out.ReadWatchedField + *out = make([]ReadWatchedFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WriteIgnoreUsers != nil { + in, out := &in.WriteIgnoreUsers, &out.WriteIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteLogDiffs != nil { + in, out := &in.WriteLogDiffs, &out.WriteLogDiffs + *out = new(bool) + **out = **in + } + if in.WriteMetadataOnly != nil { + in, out := &in.WriteMetadataOnly, &out.WriteMetadataOnly + *out = new(bool) + **out = **in + } + if in.WriteWatchedIndices != nil { + in, out := &in.WriteWatchedIndices, &out.WriteWatchedIndices + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceInitParameters. +func (in *ComplianceInitParameters) DeepCopy() *ComplianceInitParameters { + if in == nil { + return nil + } + out := new(ComplianceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceObservation) DeepCopyInto(out *ComplianceObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExternalConfig != nil { + in, out := &in.ExternalConfig, &out.ExternalConfig + *out = new(bool) + **out = **in + } + if in.InternalConfig != nil { + in, out := &in.InternalConfig, &out.InternalConfig + *out = new(bool) + **out = **in + } + if in.ReadIgnoreUsers != nil { + in, out := &in.ReadIgnoreUsers, &out.ReadIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadMetadataOnly != nil { + in, out := &in.ReadMetadataOnly, &out.ReadMetadataOnly + *out = new(bool) + **out = **in + } + if in.ReadWatchedField != nil { + in, out := &in.ReadWatchedField, &out.ReadWatchedField + *out = make([]ReadWatchedFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WriteIgnoreUsers != nil { + in, out := &in.WriteIgnoreUsers, &out.WriteIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteLogDiffs != nil { + in, out := &in.WriteLogDiffs, &out.WriteLogDiffs + *out = new(bool) + **out = **in + } + if in.WriteMetadataOnly != nil { + in, out := &in.WriteMetadataOnly, &out.WriteMetadataOnly + *out = new(bool) + **out = **in + } + if in.WriteWatchedIndices != nil { + in, out := &in.WriteWatchedIndices, &out.WriteWatchedIndices + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceObservation. +func (in *ComplianceObservation) DeepCopy() *ComplianceObservation { + if in == nil { + return nil + } + out := new(ComplianceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceParameters) DeepCopyInto(out *ComplianceParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExternalConfig != nil { + in, out := &in.ExternalConfig, &out.ExternalConfig + *out = new(bool) + **out = **in + } + if in.InternalConfig != nil { + in, out := &in.InternalConfig, &out.InternalConfig + *out = new(bool) + **out = **in + } + if in.ReadIgnoreUsers != nil { + in, out := &in.ReadIgnoreUsers, &out.ReadIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReadMetadataOnly != nil { + in, out := &in.ReadMetadataOnly, &out.ReadMetadataOnly + *out = new(bool) + **out = **in + } + if in.ReadWatchedField != nil { + in, out := &in.ReadWatchedField, &out.ReadWatchedField + *out = make([]ReadWatchedFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WriteIgnoreUsers != nil { + in, out := &in.WriteIgnoreUsers, &out.WriteIgnoreUsers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteLogDiffs != nil { + in, out := &in.WriteLogDiffs, &out.WriteLogDiffs + *out = new(bool) + **out = **in + } + if in.WriteMetadataOnly != nil { + in, out := &in.WriteMetadataOnly, &out.WriteMetadataOnly + *out = new(bool) + **out = **in + } + if in.WriteWatchedIndices != nil { + in, out := &in.WriteWatchedIndices, &out.WriteWatchedIndices + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceParameters. +func (in *ComplianceParameters) DeepCopy() *ComplianceParameters { + if in == nil { + return nil + } + out := new(ComplianceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = make([]AuditInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compliance != nil { + in, out := &in.Compliance, &out.Compliance + *out = make([]ComplianceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigInitParameters. +func (in *ConfigInitParameters) DeepCopy() *ConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = make([]AuditObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compliance != nil { + in, out := &in.Compliance, &out.Compliance + *out = make([]ComplianceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigObservation. +func (in *ConfigObservation) DeepCopy() *ConfigObservation { + if in == nil { + return nil + } + out := new(ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = make([]AuditParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compliance != nil { + in, out := &in.Compliance, &out.Compliance + *out = make([]ComplianceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigParameters. +func (in *ConfigParameters) DeepCopy() *ConfigParameters { + if in == nil { + return nil + } + out := new(ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWatchedFieldInitParameters) DeepCopyInto(out *ReadWatchedFieldInitParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWatchedFieldInitParameters. +func (in *ReadWatchedFieldInitParameters) DeepCopy() *ReadWatchedFieldInitParameters { + if in == nil { + return nil + } + out := new(ReadWatchedFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWatchedFieldObservation) DeepCopyInto(out *ReadWatchedFieldObservation) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWatchedFieldObservation. +func (in *ReadWatchedFieldObservation) DeepCopy() *ReadWatchedFieldObservation { + if in == nil { + return nil + } + out := new(ReadWatchedFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWatchedFieldParameters) DeepCopyInto(out *ReadWatchedFieldParameters) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWatchedFieldParameters. +func (in *ReadWatchedFieldParameters) DeepCopy() *ReadWatchedFieldParameters { + if in == nil { + return nil + } + out := new(ReadWatchedFieldParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/audit/v1alpha1/zz_generated.managed.go b/apis/audit/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..f9ed085 --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Config. +func (mg *Config) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Config. +func (mg *Config) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Config. +func (mg *Config) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Config. +func (mg *Config) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Config. +func (mg *Config) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Config. +func (mg *Config) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Config. +func (mg *Config) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Config. +func (mg *Config) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Config. +func (mg *Config) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Config. +func (mg *Config) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Config. +func (mg *Config) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Config. +func (mg *Config) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/audit/v1alpha1/zz_generated.managedlist.go b/apis/audit/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..1adba42 --- /dev/null +++ b/apis/audit/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigList. +func (l *ConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/audit/v1alpha1/zz_groupversion_info.go b/apis/audit/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..a6344fb --- /dev/null +++ b/apis/audit/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=audit.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "audit.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/channel/v1alpha1/zz_configuration_terraformed.go b/apis/channel/v1alpha1/zz_configuration_terraformed.go new file mode 100755 index 0000000..c9cbade --- /dev/null +++ b/apis/channel/v1alpha1/zz_configuration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Configuration +func (mg *Configuration) GetTerraformResourceType() string { + return "opensearch_channel_configuration" +} + +// GetConnectionDetailsMapping for this Configuration +func (tr *Configuration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Configuration +func (tr *Configuration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Configuration +func (tr *Configuration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Configuration +func (tr *Configuration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Configuration +func (tr *Configuration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Configuration +func (tr *Configuration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Configuration +func (tr *Configuration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Configuration +func (tr *Configuration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Configuration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Configuration) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Configuration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/channel/v1alpha1/zz_configuration_types.go b/apis/channel/v1alpha1/zz_configuration_types.go new file mode 100755 index 0000000..75eca51 --- /dev/null +++ b/apis/channel/v1alpha1/zz_configuration_types.go @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // The channel configuration document + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +type ConfigurationObservation struct { + + // The channel configuration document + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ConfigurationParameters struct { + + // The channel configuration document + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +// ConfigurationSpec defines the desired state of Configuration +type ConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationStatus defines the observed state of Configuration. +type ConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Configuration is the Schema for the Configurations API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Configuration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + Spec ConfigurationSpec `json:"spec"` + Status ConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationList contains a list of Configurations +type ConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Configuration `json:"items"` +} + +// Repository type metadata. +var ( + Configuration_Kind = "Configuration" + Configuration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Configuration_Kind}.String() + Configuration_KindAPIVersion = Configuration_Kind + "." + CRDGroupVersion.String() + Configuration_GroupVersionKind = CRDGroupVersion.WithKind(Configuration_Kind) +) + +func init() { + SchemeBuilder.Register(&Configuration{}, &ConfigurationList{}) +} diff --git a/apis/channel/v1alpha1/zz_generated.conversion_hubs.go b/apis/channel/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..68dc10d --- /dev/null +++ b/apis/channel/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Configuration) Hub() {} diff --git a/apis/channel/v1alpha1/zz_generated.deepcopy.go b/apis/channel/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..9da6091 --- /dev/null +++ b/apis/channel/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,168 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Configuration) DeepCopyInto(out *Configuration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Configuration. +func (in *Configuration) DeepCopy() *Configuration { + if in == nil { + return nil + } + out := new(Configuration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Configuration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationList) DeepCopyInto(out *ConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Configuration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationList. +func (in *ConfigurationList) DeepCopy() *ConfigurationList { + if in == nil { + return nil + } + out := new(ConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSpec) DeepCopyInto(out *ConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSpec. +func (in *ConfigurationSpec) DeepCopy() *ConfigurationSpec { + if in == nil { + return nil + } + out := new(ConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationStatus) DeepCopyInto(out *ConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationStatus. +func (in *ConfigurationStatus) DeepCopy() *ConfigurationStatus { + if in == nil { + return nil + } + out := new(ConfigurationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/channel/v1alpha1/zz_generated.managed.go b/apis/channel/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..71e4c81 --- /dev/null +++ b/apis/channel/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Configuration. +func (mg *Configuration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Configuration. +func (mg *Configuration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Configuration. +func (mg *Configuration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Configuration. +func (mg *Configuration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Configuration. +func (mg *Configuration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Configuration. +func (mg *Configuration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Configuration. +func (mg *Configuration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Configuration. +func (mg *Configuration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Configuration. +func (mg *Configuration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Configuration. +func (mg *Configuration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Configuration. +func (mg *Configuration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Configuration. +func (mg *Configuration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/channel/v1alpha1/zz_generated.managedlist.go b/apis/channel/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..44fc60f --- /dev/null +++ b/apis/channel/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigurationList. +func (l *ConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/channel/v1alpha1/zz_groupversion_info.go b/apis/channel/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..d24f99b --- /dev/null +++ b/apis/channel/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=channel.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "channel.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cluster/v1alpha1/zz_generated.conversion_hubs.go b/apis/cluster/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..b89b287 --- /dev/null +++ b/apis/cluster/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Settings) Hub() {} diff --git a/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/apis/cluster/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a6eb9ea --- /dev/null +++ b/apis/cluster/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,738 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Settings) DeepCopyInto(out *Settings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Settings. +func (in *Settings) DeepCopy() *Settings { + if in == nil { + return nil + } + out := new(Settings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Settings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.ActionAutoCreateIndex != nil { + in, out := &in.ActionAutoCreateIndex, &out.ActionAutoCreateIndex + *out = new(string) + **out = **in + } + if in.ActionDestructiveRequiresName != nil { + in, out := &in.ActionDestructiveRequiresName, &out.ActionDestructiveRequiresName + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnly != nil { + in, out := &in.ClusterBlocksReadOnly, &out.ClusterBlocksReadOnly + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnlyAllowDelete != nil { + in, out := &in.ClusterBlocksReadOnlyAllowDelete, &out.ClusterBlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.ClusterIndicesCloseEnable != nil { + in, out := &in.ClusterIndicesCloseEnable, &out.ClusterIndicesCloseEnable + *out = new(bool) + **out = **in + } + if in.ClusterInfoUpdateInterval != nil { + in, out := &in.ClusterInfoUpdateInterval, &out.ClusterInfoUpdateInterval + *out = new(string) + **out = **in + } + if in.ClusterMaxShardsPerNode != nil { + in, out := &in.ClusterMaxShardsPerNode, &out.ClusterMaxShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterMaxShardsPerNodeFrozen != nil { + in, out := &in.ClusterMaxShardsPerNodeFrozen, &out.ClusterMaxShardsPerNodeFrozen + *out = new(float64) + **out = **in + } + if in.ClusterNoMasterBlock != nil { + in, out := &in.ClusterNoMasterBlock, &out.ClusterNoMasterBlock + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationEnable != nil { + in, out := &in.ClusterPersistentTasksAllocationEnable, &out.ClusterPersistentTasksAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationRecheckInterval != nil { + in, out := &in.ClusterPersistentTasksAllocationRecheckInterval, &out.ClusterPersistentTasksAllocationRecheckInterval + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAllowRebalance != nil { + in, out := &in.ClusterRoutingAllocationAllowRebalance, &out.ClusterRoutingAllocationAllowRebalance + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAwarenessAttributes != nil { + in, out := &in.ClusterRoutingAllocationAwarenessAttributes, &out.ClusterRoutingAllocationAwarenessAttributes + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationBalanceIndex != nil { + in, out := &in.ClusterRoutingAllocationBalanceIndex, &out.ClusterRoutingAllocationBalanceIndex + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceShard != nil { + in, out := &in.ClusterRoutingAllocationBalanceShard, &out.ClusterRoutingAllocationBalanceShard + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceThreshold != nil { + in, out := &in.ClusterRoutingAllocationBalanceThreshold, &out.ClusterRoutingAllocationBalanceThreshold + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationClusterConcurrentRebalance != nil { + in, out := &in.ClusterRoutingAllocationClusterConcurrentRebalance, &out.ClusterRoutingAllocationClusterConcurrentRebalance + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationDiskIncludeRelocations != nil { + in, out := &in.ClusterRoutingAllocationDiskIncludeRelocations, &out.ClusterRoutingAllocationDiskIncludeRelocations + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskThresholdEnabled != nil { + in, out := &in.ClusterRoutingAllocationDiskThresholdEnabled, &out.ClusterRoutingAllocationDiskThresholdEnabled + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkHigh != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkHigh, &out.ClusterRoutingAllocationDiskWatermarkHigh + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkLow != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkLow, &out.ClusterRoutingAllocationDiskWatermarkLow + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationEnable != nil { + in, out := &in.ClusterRoutingAllocationEnable, &out.ClusterRoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentRecoveries, &out.ClusterRoutingAllocationNodeConcurrentRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries, &out.ClusterRoutingAllocationNodeInitialPrimariesRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationSameShardHost != nil { + in, out := &in.ClusterRoutingAllocationSameShardHost, &out.ClusterRoutingAllocationSameShardHost + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationTotalShardsPerNode != nil { + in, out := &in.ClusterRoutingAllocationTotalShardsPerNode, &out.ClusterRoutingAllocationTotalShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterRoutingRebalanceEnable != nil { + in, out := &in.ClusterRoutingRebalanceEnable, &out.ClusterRoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataLimit != nil { + in, out := &in.IndicesBreakerFielddataLimit, &out.IndicesBreakerFielddataLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataOverhead != nil { + in, out := &in.IndicesBreakerFielddataOverhead, &out.IndicesBreakerFielddataOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerRequestLimit != nil { + in, out := &in.IndicesBreakerRequestLimit, &out.IndicesBreakerRequestLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerRequestOverhead != nil { + in, out := &in.IndicesBreakerRequestOverhead, &out.IndicesBreakerRequestOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerTotalLimit != nil { + in, out := &in.IndicesBreakerTotalLimit, &out.IndicesBreakerTotalLimit + *out = new(string) + **out = **in + } + if in.IndicesRecoveryMaxBytesPerSec != nil { + in, out := &in.IndicesRecoveryMaxBytesPerSec, &out.IndicesRecoveryMaxBytesPerSec + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsLimit != nil { + in, out := &in.NetworkBreakerInflightRequestsLimit, &out.NetworkBreakerInflightRequestsLimit + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsOverhead != nil { + in, out := &in.NetworkBreakerInflightRequestsOverhead, &out.NetworkBreakerInflightRequestsOverhead + *out = new(float64) + **out = **in + } + if in.ScriptMaxCompilationsRate != nil { + in, out := &in.ScriptMaxCompilationsRate, &out.ScriptMaxCompilationsRate + *out = new(string) + **out = **in + } + if in.SearchDefaultSearchTimeout != nil { + in, out := &in.SearchDefaultSearchTimeout, &out.SearchDefaultSearchTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsList) DeepCopyInto(out *SettingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Settings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsList. +func (in *SettingsList) DeepCopy() *SettingsList { + if in == nil { + return nil + } + out := new(SettingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SettingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.ActionAutoCreateIndex != nil { + in, out := &in.ActionAutoCreateIndex, &out.ActionAutoCreateIndex + *out = new(string) + **out = **in + } + if in.ActionDestructiveRequiresName != nil { + in, out := &in.ActionDestructiveRequiresName, &out.ActionDestructiveRequiresName + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnly != nil { + in, out := &in.ClusterBlocksReadOnly, &out.ClusterBlocksReadOnly + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnlyAllowDelete != nil { + in, out := &in.ClusterBlocksReadOnlyAllowDelete, &out.ClusterBlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.ClusterIndicesCloseEnable != nil { + in, out := &in.ClusterIndicesCloseEnable, &out.ClusterIndicesCloseEnable + *out = new(bool) + **out = **in + } + if in.ClusterInfoUpdateInterval != nil { + in, out := &in.ClusterInfoUpdateInterval, &out.ClusterInfoUpdateInterval + *out = new(string) + **out = **in + } + if in.ClusterMaxShardsPerNode != nil { + in, out := &in.ClusterMaxShardsPerNode, &out.ClusterMaxShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterMaxShardsPerNodeFrozen != nil { + in, out := &in.ClusterMaxShardsPerNodeFrozen, &out.ClusterMaxShardsPerNodeFrozen + *out = new(float64) + **out = **in + } + if in.ClusterNoMasterBlock != nil { + in, out := &in.ClusterNoMasterBlock, &out.ClusterNoMasterBlock + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationEnable != nil { + in, out := &in.ClusterPersistentTasksAllocationEnable, &out.ClusterPersistentTasksAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationRecheckInterval != nil { + in, out := &in.ClusterPersistentTasksAllocationRecheckInterval, &out.ClusterPersistentTasksAllocationRecheckInterval + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAllowRebalance != nil { + in, out := &in.ClusterRoutingAllocationAllowRebalance, &out.ClusterRoutingAllocationAllowRebalance + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAwarenessAttributes != nil { + in, out := &in.ClusterRoutingAllocationAwarenessAttributes, &out.ClusterRoutingAllocationAwarenessAttributes + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationBalanceIndex != nil { + in, out := &in.ClusterRoutingAllocationBalanceIndex, &out.ClusterRoutingAllocationBalanceIndex + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceShard != nil { + in, out := &in.ClusterRoutingAllocationBalanceShard, &out.ClusterRoutingAllocationBalanceShard + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceThreshold != nil { + in, out := &in.ClusterRoutingAllocationBalanceThreshold, &out.ClusterRoutingAllocationBalanceThreshold + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationClusterConcurrentRebalance != nil { + in, out := &in.ClusterRoutingAllocationClusterConcurrentRebalance, &out.ClusterRoutingAllocationClusterConcurrentRebalance + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationDiskIncludeRelocations != nil { + in, out := &in.ClusterRoutingAllocationDiskIncludeRelocations, &out.ClusterRoutingAllocationDiskIncludeRelocations + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskThresholdEnabled != nil { + in, out := &in.ClusterRoutingAllocationDiskThresholdEnabled, &out.ClusterRoutingAllocationDiskThresholdEnabled + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkHigh != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkHigh, &out.ClusterRoutingAllocationDiskWatermarkHigh + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkLow != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkLow, &out.ClusterRoutingAllocationDiskWatermarkLow + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationEnable != nil { + in, out := &in.ClusterRoutingAllocationEnable, &out.ClusterRoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentRecoveries, &out.ClusterRoutingAllocationNodeConcurrentRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries, &out.ClusterRoutingAllocationNodeInitialPrimariesRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationSameShardHost != nil { + in, out := &in.ClusterRoutingAllocationSameShardHost, &out.ClusterRoutingAllocationSameShardHost + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationTotalShardsPerNode != nil { + in, out := &in.ClusterRoutingAllocationTotalShardsPerNode, &out.ClusterRoutingAllocationTotalShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterRoutingRebalanceEnable != nil { + in, out := &in.ClusterRoutingRebalanceEnable, &out.ClusterRoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataLimit != nil { + in, out := &in.IndicesBreakerFielddataLimit, &out.IndicesBreakerFielddataLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataOverhead != nil { + in, out := &in.IndicesBreakerFielddataOverhead, &out.IndicesBreakerFielddataOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerRequestLimit != nil { + in, out := &in.IndicesBreakerRequestLimit, &out.IndicesBreakerRequestLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerRequestOverhead != nil { + in, out := &in.IndicesBreakerRequestOverhead, &out.IndicesBreakerRequestOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerTotalLimit != nil { + in, out := &in.IndicesBreakerTotalLimit, &out.IndicesBreakerTotalLimit + *out = new(string) + **out = **in + } + if in.IndicesRecoveryMaxBytesPerSec != nil { + in, out := &in.IndicesRecoveryMaxBytesPerSec, &out.IndicesRecoveryMaxBytesPerSec + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsLimit != nil { + in, out := &in.NetworkBreakerInflightRequestsLimit, &out.NetworkBreakerInflightRequestsLimit + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsOverhead != nil { + in, out := &in.NetworkBreakerInflightRequestsOverhead, &out.NetworkBreakerInflightRequestsOverhead + *out = new(float64) + **out = **in + } + if in.ScriptMaxCompilationsRate != nil { + in, out := &in.ScriptMaxCompilationsRate, &out.ScriptMaxCompilationsRate + *out = new(string) + **out = **in + } + if in.SearchDefaultSearchTimeout != nil { + in, out := &in.SearchDefaultSearchTimeout, &out.SearchDefaultSearchTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.ActionAutoCreateIndex != nil { + in, out := &in.ActionAutoCreateIndex, &out.ActionAutoCreateIndex + *out = new(string) + **out = **in + } + if in.ActionDestructiveRequiresName != nil { + in, out := &in.ActionDestructiveRequiresName, &out.ActionDestructiveRequiresName + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnly != nil { + in, out := &in.ClusterBlocksReadOnly, &out.ClusterBlocksReadOnly + *out = new(bool) + **out = **in + } + if in.ClusterBlocksReadOnlyAllowDelete != nil { + in, out := &in.ClusterBlocksReadOnlyAllowDelete, &out.ClusterBlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.ClusterIndicesCloseEnable != nil { + in, out := &in.ClusterIndicesCloseEnable, &out.ClusterIndicesCloseEnable + *out = new(bool) + **out = **in + } + if in.ClusterInfoUpdateInterval != nil { + in, out := &in.ClusterInfoUpdateInterval, &out.ClusterInfoUpdateInterval + *out = new(string) + **out = **in + } + if in.ClusterMaxShardsPerNode != nil { + in, out := &in.ClusterMaxShardsPerNode, &out.ClusterMaxShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterMaxShardsPerNodeFrozen != nil { + in, out := &in.ClusterMaxShardsPerNodeFrozen, &out.ClusterMaxShardsPerNodeFrozen + *out = new(float64) + **out = **in + } + if in.ClusterNoMasterBlock != nil { + in, out := &in.ClusterNoMasterBlock, &out.ClusterNoMasterBlock + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationEnable != nil { + in, out := &in.ClusterPersistentTasksAllocationEnable, &out.ClusterPersistentTasksAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterPersistentTasksAllocationRecheckInterval != nil { + in, out := &in.ClusterPersistentTasksAllocationRecheckInterval, &out.ClusterPersistentTasksAllocationRecheckInterval + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAllowRebalance != nil { + in, out := &in.ClusterRoutingAllocationAllowRebalance, &out.ClusterRoutingAllocationAllowRebalance + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationAwarenessAttributes != nil { + in, out := &in.ClusterRoutingAllocationAwarenessAttributes, &out.ClusterRoutingAllocationAwarenessAttributes + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationBalanceIndex != nil { + in, out := &in.ClusterRoutingAllocationBalanceIndex, &out.ClusterRoutingAllocationBalanceIndex + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceShard != nil { + in, out := &in.ClusterRoutingAllocationBalanceShard, &out.ClusterRoutingAllocationBalanceShard + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationBalanceThreshold != nil { + in, out := &in.ClusterRoutingAllocationBalanceThreshold, &out.ClusterRoutingAllocationBalanceThreshold + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationClusterConcurrentRebalance != nil { + in, out := &in.ClusterRoutingAllocationClusterConcurrentRebalance, &out.ClusterRoutingAllocationClusterConcurrentRebalance + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationDiskIncludeRelocations != nil { + in, out := &in.ClusterRoutingAllocationDiskIncludeRelocations, &out.ClusterRoutingAllocationDiskIncludeRelocations + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskThresholdEnabled != nil { + in, out := &in.ClusterRoutingAllocationDiskThresholdEnabled, &out.ClusterRoutingAllocationDiskThresholdEnabled + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkHigh != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkHigh, &out.ClusterRoutingAllocationDiskWatermarkHigh + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationDiskWatermarkLow != nil { + in, out := &in.ClusterRoutingAllocationDiskWatermarkLow, &out.ClusterRoutingAllocationDiskWatermarkLow + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationEnable != nil { + in, out := &in.ClusterRoutingAllocationEnable, &out.ClusterRoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentIncomingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries, &out.ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeConcurrentRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeConcurrentRecoveries, &out.ClusterRoutingAllocationNodeConcurrentRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries != nil { + in, out := &in.ClusterRoutingAllocationNodeInitialPrimariesRecoveries, &out.ClusterRoutingAllocationNodeInitialPrimariesRecoveries + *out = new(float64) + **out = **in + } + if in.ClusterRoutingAllocationSameShardHost != nil { + in, out := &in.ClusterRoutingAllocationSameShardHost, &out.ClusterRoutingAllocationSameShardHost + *out = new(bool) + **out = **in + } + if in.ClusterRoutingAllocationTotalShardsPerNode != nil { + in, out := &in.ClusterRoutingAllocationTotalShardsPerNode, &out.ClusterRoutingAllocationTotalShardsPerNode + *out = new(float64) + **out = **in + } + if in.ClusterRoutingRebalanceEnable != nil { + in, out := &in.ClusterRoutingRebalanceEnable, &out.ClusterRoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataLimit != nil { + in, out := &in.IndicesBreakerFielddataLimit, &out.IndicesBreakerFielddataLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerFielddataOverhead != nil { + in, out := &in.IndicesBreakerFielddataOverhead, &out.IndicesBreakerFielddataOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerRequestLimit != nil { + in, out := &in.IndicesBreakerRequestLimit, &out.IndicesBreakerRequestLimit + *out = new(string) + **out = **in + } + if in.IndicesBreakerRequestOverhead != nil { + in, out := &in.IndicesBreakerRequestOverhead, &out.IndicesBreakerRequestOverhead + *out = new(float64) + **out = **in + } + if in.IndicesBreakerTotalLimit != nil { + in, out := &in.IndicesBreakerTotalLimit, &out.IndicesBreakerTotalLimit + *out = new(string) + **out = **in + } + if in.IndicesRecoveryMaxBytesPerSec != nil { + in, out := &in.IndicesRecoveryMaxBytesPerSec, &out.IndicesRecoveryMaxBytesPerSec + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsLimit != nil { + in, out := &in.NetworkBreakerInflightRequestsLimit, &out.NetworkBreakerInflightRequestsLimit + *out = new(string) + **out = **in + } + if in.NetworkBreakerInflightRequestsOverhead != nil { + in, out := &in.NetworkBreakerInflightRequestsOverhead, &out.NetworkBreakerInflightRequestsOverhead + *out = new(float64) + **out = **in + } + if in.ScriptMaxCompilationsRate != nil { + in, out := &in.ScriptMaxCompilationsRate, &out.ScriptMaxCompilationsRate + *out = new(string) + **out = **in + } + if in.SearchDefaultSearchTimeout != nil { + in, out := &in.SearchDefaultSearchTimeout, &out.SearchDefaultSearchTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsSpec) DeepCopyInto(out *SettingsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsSpec. +func (in *SettingsSpec) DeepCopy() *SettingsSpec { + if in == nil { + return nil + } + out := new(SettingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsStatus) DeepCopyInto(out *SettingsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsStatus. +func (in *SettingsStatus) DeepCopy() *SettingsStatus { + if in == nil { + return nil + } + out := new(SettingsStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cluster/v1alpha1/zz_generated.managed.go b/apis/cluster/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..bf972ad --- /dev/null +++ b/apis/cluster/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Settings. +func (mg *Settings) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Settings. +func (mg *Settings) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Settings. +func (mg *Settings) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Settings. +func (mg *Settings) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Settings. +func (mg *Settings) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Settings. +func (mg *Settings) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Settings. +func (mg *Settings) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Settings. +func (mg *Settings) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Settings. +func (mg *Settings) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Settings. +func (mg *Settings) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Settings. +func (mg *Settings) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Settings. +func (mg *Settings) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cluster/v1alpha1/zz_generated.managedlist.go b/apis/cluster/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..839221e --- /dev/null +++ b/apis/cluster/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SettingsList. +func (l *SettingsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cluster/v1alpha1/zz_groupversion_info.go b/apis/cluster/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..2a3bdc0 --- /dev/null +++ b/apis/cluster/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cluster.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cluster.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cluster/v1alpha1/zz_settings_terraformed.go b/apis/cluster/v1alpha1/zz_settings_terraformed.go new file mode 100755 index 0000000..75935ba --- /dev/null +++ b/apis/cluster/v1alpha1/zz_settings_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Settings +func (mg *Settings) GetTerraformResourceType() string { + return "opensearch_cluster_settings" +} + +// GetConnectionDetailsMapping for this Settings +func (tr *Settings) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Settings +func (tr *Settings) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Settings +func (tr *Settings) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Settings +func (tr *Settings) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Settings +func (tr *Settings) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Settings +func (tr *Settings) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Settings +func (tr *Settings) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Settings +func (tr *Settings) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Settings using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Settings) LateInitialize(attrs []byte) (bool, error) { + params := &SettingsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Settings) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cluster/v1alpha1/zz_settings_types.go b/apis/cluster/v1alpha1/zz_settings_types.go new file mode 100755 index 0000000..bc8aafe --- /dev/null +++ b/apis/cluster/v1alpha1/zz_settings_types.go @@ -0,0 +1,594 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SettingsInitParameters struct { + + // (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + // Whether to automatically create an index if it doesn’t already exist and apply any configured index template + ActionAutoCreateIndex *string `json:"actionAutoCreateIndex,omitempty" tf:"action_auto_create_index,omitempty"` + + // (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + // When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + ActionDestructiveRequiresName *bool `json:"actionDestructiveRequiresName,omitempty" tf:"action_destructive_requires_name,omitempty"` + + // (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + // Make the whole cluster read only and metadata is not allowed to be modified + ClusterBlocksReadOnly *bool `json:"clusterBlocksReadOnly,omitempty" tf:"cluster_blocks_read_only,omitempty"` + + // (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + // Make the whole cluster read only, but allows to delete indices to free up resources + ClusterBlocksReadOnlyAllowDelete *bool `json:"clusterBlocksReadOnlyAllowDelete,omitempty" tf:"cluster_blocks_read_only_allow_delete,omitempty"` + + // (Boolean) If false, you cannot close open indices + // If false, you cannot close open indices + ClusterIndicesCloseEnable *bool `json:"clusterIndicesCloseEnable,omitempty" tf:"cluster_indices_close_enable,omitempty"` + + // (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + // A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + ClusterInfoUpdateInterval *string `json:"clusterInfoUpdateInterval,omitempty" tf:"cluster_info_update_interval,omitempty"` + + // frozen data nodes; shards for closed indices do not count toward this limit + // The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + ClusterMaxShardsPerNode *float64 `json:"clusterMaxShardsPerNode,omitempty" tf:"cluster_max_shards_per_node,omitempty"` + + // (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + // The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + ClusterMaxShardsPerNodeFrozen *float64 `json:"clusterMaxShardsPerNodeFrozen,omitempty" tf:"cluster_max_shards_per_node_frozen,omitempty"` + + // (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + // Specifies which operations are rejected when there is no active master in a cluster (all, write) + ClusterNoMasterBlock *string `json:"clusterNoMasterBlock,omitempty" tf:"cluster_no_master_block,omitempty"` + + // (String) Whether allocation for persistent tasks is active (all, none) + // Whether allocation for persistent tasks is active (all, none) + ClusterPersistentTasksAllocationEnable *string `json:"clusterPersistentTasksAllocationEnable,omitempty" tf:"cluster_persistent_tasks_allocation_enable,omitempty"` + + // (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + // A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + ClusterPersistentTasksAllocationRecheckInterval *string `json:"clusterPersistentTasksAllocationRecheckInterval,omitempty" tf:"cluster_persistent_tasks_allocation_recheck_interval,omitempty"` + + // (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + // Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + ClusterRoutingAllocationAllowRebalance *string `json:"clusterRoutingAllocationAllowRebalance,omitempty" tf:"cluster_routing_allocation_allow_rebalance,omitempty"` + + // (String) Use custom node attributes to take hardware configuration into account when allocating shards + // Use custom node attributes to take hardware configuration into account when allocating shards + ClusterRoutingAllocationAwarenessAttributes *string `json:"clusterRoutingAllocationAwarenessAttributes,omitempty" tf:"cluster_routing_allocation_awareness_attributes,omitempty"` + + // (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + // Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + ClusterRoutingAllocationBalanceIndex *float64 `json:"clusterRoutingAllocationBalanceIndex,omitempty" tf:"cluster_routing_allocation_balance_index,omitempty"` + + // (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + // Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + ClusterRoutingAllocationBalanceShard *float64 `json:"clusterRoutingAllocationBalanceShard,omitempty" tf:"cluster_routing_allocation_balance_shard,omitempty"` + + // (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + // Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + ClusterRoutingAllocationBalanceThreshold *float64 `json:"clusterRoutingAllocationBalanceThreshold,omitempty" tf:"cluster_routing_allocation_balance_threshold,omitempty"` + + // (Number) How many concurrent shard rebalances are allowed cluster wide + // How many concurrent shard rebalances are allowed cluster wide + ClusterRoutingAllocationClusterConcurrentRebalance *float64 `json:"clusterRoutingAllocationClusterConcurrentRebalance,omitempty" tf:"cluster_routing_allocation_cluster_concurrent_rebalance,omitempty"` + + // (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + // Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + ClusterRoutingAllocationDiskIncludeRelocations *bool `json:"clusterRoutingAllocationDiskIncludeRelocations,omitempty" tf:"cluster_routing_allocation_disk_include_relocations,omitempty"` + + // (Boolean) Whether the disk allocation decider is active + // Whether the disk allocation decider is active + ClusterRoutingAllocationDiskThresholdEnabled *bool `json:"clusterRoutingAllocationDiskThresholdEnabled,omitempty" tf:"cluster_routing_allocation_disk_threshold_enabled,omitempty"` + + // (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + // Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + ClusterRoutingAllocationDiskWatermarkHigh *string `json:"clusterRoutingAllocationDiskWatermarkHigh,omitempty" tf:"cluster_routing_allocation_disk_watermark_high,omitempty"` + + // (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + // Allocator will not allocate shards to nodes that have more than this percentage disk used + ClusterRoutingAllocationDiskWatermarkLow *string `json:"clusterRoutingAllocationDiskWatermarkLow,omitempty" tf:"cluster_routing_allocation_disk_watermark_low,omitempty"` + + // (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + // Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + ClusterRoutingAllocationEnable *string `json:"clusterRoutingAllocationEnable,omitempty" tf:"cluster_routing_allocation_enable,omitempty"` + + // (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + // How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + ClusterRoutingAllocationNodeConcurrentIncomingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentIncomingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_incoming_recoveries,omitempty"` + + // (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + // How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentOutgoingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_outgoing_recoveries,omitempty"` + + // (Number) A shortcut to set both incoming and outgoing recoveries + // A shortcut to set both incoming and outgoing recoveries + ClusterRoutingAllocationNodeConcurrentRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + + // (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + // Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + ClusterRoutingAllocationNodeInitialPrimariesRecoveries *float64 `json:"clusterRoutingAllocationNodeInitialPrimariesRecoveries,omitempty" tf:"cluster_routing_allocation_node_initial_primaries_recoveries,omitempty"` + + // (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + // Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + ClusterRoutingAllocationSameShardHost *bool `json:"clusterRoutingAllocationSameShardHost,omitempty" tf:"cluster_routing_allocation_same_shard_host,omitempty"` + + // (Number) Maximum number of primary and replica shards allocated to each node + // Maximum number of primary and replica shards allocated to each node + ClusterRoutingAllocationTotalShardsPerNode *float64 `json:"clusterRoutingAllocationTotalShardsPerNode,omitempty" tf:"cluster_routing_allocation_total_shards_per_node,omitempty"` + + // (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + // Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + ClusterRoutingRebalanceEnable *string `json:"clusterRoutingRebalanceEnable,omitempty" tf:"cluster_routing_rebalance_enable,omitempty"` + + // (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + // The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + IndicesBreakerFielddataLimit *string `json:"indicesBreakerFielddataLimit,omitempty" tf:"indices_breaker_fielddata_limit,omitempty"` + + // (Number) A constant that all field data estimations are multiplied by + // A constant that all field data estimations are multiplied by + IndicesBreakerFielddataOverhead *float64 `json:"indicesBreakerFielddataOverhead,omitempty" tf:"indices_breaker_fielddata_overhead,omitempty"` + + // request data structures (e.g. calculating aggregations) are prevented from exceeding + // The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + IndicesBreakerRequestLimit *string `json:"indicesBreakerRequestLimit,omitempty" tf:"indices_breaker_request_limit,omitempty"` + + // (Number) A constant that all request estimations are multiplied by + // A constant that all request estimations are multiplied by + IndicesBreakerRequestOverhead *float64 `json:"indicesBreakerRequestOverhead,omitempty" tf:"indices_breaker_request_overhead,omitempty"` + + // (String) The percentage of total amount of memory that can be used across all breakers + // The percentage of total amount of memory that can be used across all breakers + IndicesBreakerTotalLimit *string `json:"indicesBreakerTotalLimit,omitempty" tf:"indices_breaker_total_limit,omitempty"` + + // (String) Maximum total inbound and outbound recovery traffic for each node, in mb + // Maximum total inbound and outbound recovery traffic for each node, in mb + IndicesRecoveryMaxBytesPerSec *string `json:"indicesRecoveryMaxBytesPerSec,omitempty" tf:"indices_recovery_max_bytes_per_sec,omitempty"` + + // (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + // The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + NetworkBreakerInflightRequestsLimit *string `json:"networkBreakerInflightRequestsLimit,omitempty" tf:"network_breaker_inflight_requests_limit,omitempty"` + + // (Number) A constant that all in flight requests estimations are multiplied by + // A constant that all in flight requests estimations are multiplied by + NetworkBreakerInflightRequestsOverhead *float64 `json:"networkBreakerInflightRequestsOverhead,omitempty" tf:"network_breaker_inflight_requests_overhead,omitempty"` + + // (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + // Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + ScriptMaxCompilationsRate *string `json:"scriptMaxCompilationsRate,omitempty" tf:"script_max_compilations_rate,omitempty"` + + // wide default timeout for all search requests + // A time string setting a cluster-wide default timeout for all search requests + SearchDefaultSearchTimeout *string `json:"searchDefaultSearchTimeout,omitempty" tf:"search_default_search_timeout,omitempty"` +} + +type SettingsObservation struct { + + // (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + // Whether to automatically create an index if it doesn’t already exist and apply any configured index template + ActionAutoCreateIndex *string `json:"actionAutoCreateIndex,omitempty" tf:"action_auto_create_index,omitempty"` + + // (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + // When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + ActionDestructiveRequiresName *bool `json:"actionDestructiveRequiresName,omitempty" tf:"action_destructive_requires_name,omitempty"` + + // (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + // Make the whole cluster read only and metadata is not allowed to be modified + ClusterBlocksReadOnly *bool `json:"clusterBlocksReadOnly,omitempty" tf:"cluster_blocks_read_only,omitempty"` + + // (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + // Make the whole cluster read only, but allows to delete indices to free up resources + ClusterBlocksReadOnlyAllowDelete *bool `json:"clusterBlocksReadOnlyAllowDelete,omitempty" tf:"cluster_blocks_read_only_allow_delete,omitempty"` + + // (Boolean) If false, you cannot close open indices + // If false, you cannot close open indices + ClusterIndicesCloseEnable *bool `json:"clusterIndicesCloseEnable,omitempty" tf:"cluster_indices_close_enable,omitempty"` + + // (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + // A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + ClusterInfoUpdateInterval *string `json:"clusterInfoUpdateInterval,omitempty" tf:"cluster_info_update_interval,omitempty"` + + // frozen data nodes; shards for closed indices do not count toward this limit + // The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + ClusterMaxShardsPerNode *float64 `json:"clusterMaxShardsPerNode,omitempty" tf:"cluster_max_shards_per_node,omitempty"` + + // (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + // The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + ClusterMaxShardsPerNodeFrozen *float64 `json:"clusterMaxShardsPerNodeFrozen,omitempty" tf:"cluster_max_shards_per_node_frozen,omitempty"` + + // (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + // Specifies which operations are rejected when there is no active master in a cluster (all, write) + ClusterNoMasterBlock *string `json:"clusterNoMasterBlock,omitempty" tf:"cluster_no_master_block,omitempty"` + + // (String) Whether allocation for persistent tasks is active (all, none) + // Whether allocation for persistent tasks is active (all, none) + ClusterPersistentTasksAllocationEnable *string `json:"clusterPersistentTasksAllocationEnable,omitempty" tf:"cluster_persistent_tasks_allocation_enable,omitempty"` + + // (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + // A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + ClusterPersistentTasksAllocationRecheckInterval *string `json:"clusterPersistentTasksAllocationRecheckInterval,omitempty" tf:"cluster_persistent_tasks_allocation_recheck_interval,omitempty"` + + // (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + // Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + ClusterRoutingAllocationAllowRebalance *string `json:"clusterRoutingAllocationAllowRebalance,omitempty" tf:"cluster_routing_allocation_allow_rebalance,omitempty"` + + // (String) Use custom node attributes to take hardware configuration into account when allocating shards + // Use custom node attributes to take hardware configuration into account when allocating shards + ClusterRoutingAllocationAwarenessAttributes *string `json:"clusterRoutingAllocationAwarenessAttributes,omitempty" tf:"cluster_routing_allocation_awareness_attributes,omitempty"` + + // (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + // Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + ClusterRoutingAllocationBalanceIndex *float64 `json:"clusterRoutingAllocationBalanceIndex,omitempty" tf:"cluster_routing_allocation_balance_index,omitempty"` + + // (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + // Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + ClusterRoutingAllocationBalanceShard *float64 `json:"clusterRoutingAllocationBalanceShard,omitempty" tf:"cluster_routing_allocation_balance_shard,omitempty"` + + // (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + // Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + ClusterRoutingAllocationBalanceThreshold *float64 `json:"clusterRoutingAllocationBalanceThreshold,omitempty" tf:"cluster_routing_allocation_balance_threshold,omitempty"` + + // (Number) How many concurrent shard rebalances are allowed cluster wide + // How many concurrent shard rebalances are allowed cluster wide + ClusterRoutingAllocationClusterConcurrentRebalance *float64 `json:"clusterRoutingAllocationClusterConcurrentRebalance,omitempty" tf:"cluster_routing_allocation_cluster_concurrent_rebalance,omitempty"` + + // (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + // Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + ClusterRoutingAllocationDiskIncludeRelocations *bool `json:"clusterRoutingAllocationDiskIncludeRelocations,omitempty" tf:"cluster_routing_allocation_disk_include_relocations,omitempty"` + + // (Boolean) Whether the disk allocation decider is active + // Whether the disk allocation decider is active + ClusterRoutingAllocationDiskThresholdEnabled *bool `json:"clusterRoutingAllocationDiskThresholdEnabled,omitempty" tf:"cluster_routing_allocation_disk_threshold_enabled,omitempty"` + + // (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + // Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + ClusterRoutingAllocationDiskWatermarkHigh *string `json:"clusterRoutingAllocationDiskWatermarkHigh,omitempty" tf:"cluster_routing_allocation_disk_watermark_high,omitempty"` + + // (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + // Allocator will not allocate shards to nodes that have more than this percentage disk used + ClusterRoutingAllocationDiskWatermarkLow *string `json:"clusterRoutingAllocationDiskWatermarkLow,omitempty" tf:"cluster_routing_allocation_disk_watermark_low,omitempty"` + + // (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + // Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + ClusterRoutingAllocationEnable *string `json:"clusterRoutingAllocationEnable,omitempty" tf:"cluster_routing_allocation_enable,omitempty"` + + // (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + // How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + ClusterRoutingAllocationNodeConcurrentIncomingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentIncomingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_incoming_recoveries,omitempty"` + + // (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + // How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentOutgoingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_outgoing_recoveries,omitempty"` + + // (Number) A shortcut to set both incoming and outgoing recoveries + // A shortcut to set both incoming and outgoing recoveries + ClusterRoutingAllocationNodeConcurrentRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + + // (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + // Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + ClusterRoutingAllocationNodeInitialPrimariesRecoveries *float64 `json:"clusterRoutingAllocationNodeInitialPrimariesRecoveries,omitempty" tf:"cluster_routing_allocation_node_initial_primaries_recoveries,omitempty"` + + // (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + // Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + ClusterRoutingAllocationSameShardHost *bool `json:"clusterRoutingAllocationSameShardHost,omitempty" tf:"cluster_routing_allocation_same_shard_host,omitempty"` + + // (Number) Maximum number of primary and replica shards allocated to each node + // Maximum number of primary and replica shards allocated to each node + ClusterRoutingAllocationTotalShardsPerNode *float64 `json:"clusterRoutingAllocationTotalShardsPerNode,omitempty" tf:"cluster_routing_allocation_total_shards_per_node,omitempty"` + + // (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + // Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + ClusterRoutingRebalanceEnable *string `json:"clusterRoutingRebalanceEnable,omitempty" tf:"cluster_routing_rebalance_enable,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + // The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + IndicesBreakerFielddataLimit *string `json:"indicesBreakerFielddataLimit,omitempty" tf:"indices_breaker_fielddata_limit,omitempty"` + + // (Number) A constant that all field data estimations are multiplied by + // A constant that all field data estimations are multiplied by + IndicesBreakerFielddataOverhead *float64 `json:"indicesBreakerFielddataOverhead,omitempty" tf:"indices_breaker_fielddata_overhead,omitempty"` + + // request data structures (e.g. calculating aggregations) are prevented from exceeding + // The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + IndicesBreakerRequestLimit *string `json:"indicesBreakerRequestLimit,omitempty" tf:"indices_breaker_request_limit,omitempty"` + + // (Number) A constant that all request estimations are multiplied by + // A constant that all request estimations are multiplied by + IndicesBreakerRequestOverhead *float64 `json:"indicesBreakerRequestOverhead,omitempty" tf:"indices_breaker_request_overhead,omitempty"` + + // (String) The percentage of total amount of memory that can be used across all breakers + // The percentage of total amount of memory that can be used across all breakers + IndicesBreakerTotalLimit *string `json:"indicesBreakerTotalLimit,omitempty" tf:"indices_breaker_total_limit,omitempty"` + + // (String) Maximum total inbound and outbound recovery traffic for each node, in mb + // Maximum total inbound and outbound recovery traffic for each node, in mb + IndicesRecoveryMaxBytesPerSec *string `json:"indicesRecoveryMaxBytesPerSec,omitempty" tf:"indices_recovery_max_bytes_per_sec,omitempty"` + + // (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + // The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + NetworkBreakerInflightRequestsLimit *string `json:"networkBreakerInflightRequestsLimit,omitempty" tf:"network_breaker_inflight_requests_limit,omitempty"` + + // (Number) A constant that all in flight requests estimations are multiplied by + // A constant that all in flight requests estimations are multiplied by + NetworkBreakerInflightRequestsOverhead *float64 `json:"networkBreakerInflightRequestsOverhead,omitempty" tf:"network_breaker_inflight_requests_overhead,omitempty"` + + // (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + // Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + ScriptMaxCompilationsRate *string `json:"scriptMaxCompilationsRate,omitempty" tf:"script_max_compilations_rate,omitempty"` + + // wide default timeout for all search requests + // A time string setting a cluster-wide default timeout for all search requests + SearchDefaultSearchTimeout *string `json:"searchDefaultSearchTimeout,omitempty" tf:"search_default_search_timeout,omitempty"` +} + +type SettingsParameters struct { + + // (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + // Whether to automatically create an index if it doesn’t already exist and apply any configured index template + // +kubebuilder:validation:Optional + ActionAutoCreateIndex *string `json:"actionAutoCreateIndex,omitempty" tf:"action_auto_create_index,omitempty"` + + // (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + // When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + // +kubebuilder:validation:Optional + ActionDestructiveRequiresName *bool `json:"actionDestructiveRequiresName,omitempty" tf:"action_destructive_requires_name,omitempty"` + + // (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + // Make the whole cluster read only and metadata is not allowed to be modified + // +kubebuilder:validation:Optional + ClusterBlocksReadOnly *bool `json:"clusterBlocksReadOnly,omitempty" tf:"cluster_blocks_read_only,omitempty"` + + // (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + // Make the whole cluster read only, but allows to delete indices to free up resources + // +kubebuilder:validation:Optional + ClusterBlocksReadOnlyAllowDelete *bool `json:"clusterBlocksReadOnlyAllowDelete,omitempty" tf:"cluster_blocks_read_only_allow_delete,omitempty"` + + // (Boolean) If false, you cannot close open indices + // If false, you cannot close open indices + // +kubebuilder:validation:Optional + ClusterIndicesCloseEnable *bool `json:"clusterIndicesCloseEnable,omitempty" tf:"cluster_indices_close_enable,omitempty"` + + // (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + // A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + // +kubebuilder:validation:Optional + ClusterInfoUpdateInterval *string `json:"clusterInfoUpdateInterval,omitempty" tf:"cluster_info_update_interval,omitempty"` + + // frozen data nodes; shards for closed indices do not count toward this limit + // The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + // +kubebuilder:validation:Optional + ClusterMaxShardsPerNode *float64 `json:"clusterMaxShardsPerNode,omitempty" tf:"cluster_max_shards_per_node,omitempty"` + + // (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + // The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + // +kubebuilder:validation:Optional + ClusterMaxShardsPerNodeFrozen *float64 `json:"clusterMaxShardsPerNodeFrozen,omitempty" tf:"cluster_max_shards_per_node_frozen,omitempty"` + + // (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + // Specifies which operations are rejected when there is no active master in a cluster (all, write) + // +kubebuilder:validation:Optional + ClusterNoMasterBlock *string `json:"clusterNoMasterBlock,omitempty" tf:"cluster_no_master_block,omitempty"` + + // (String) Whether allocation for persistent tasks is active (all, none) + // Whether allocation for persistent tasks is active (all, none) + // +kubebuilder:validation:Optional + ClusterPersistentTasksAllocationEnable *string `json:"clusterPersistentTasksAllocationEnable,omitempty" tf:"cluster_persistent_tasks_allocation_enable,omitempty"` + + // (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + // A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + // +kubebuilder:validation:Optional + ClusterPersistentTasksAllocationRecheckInterval *string `json:"clusterPersistentTasksAllocationRecheckInterval,omitempty" tf:"cluster_persistent_tasks_allocation_recheck_interval,omitempty"` + + // (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + // Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + // +kubebuilder:validation:Optional + ClusterRoutingAllocationAllowRebalance *string `json:"clusterRoutingAllocationAllowRebalance,omitempty" tf:"cluster_routing_allocation_allow_rebalance,omitempty"` + + // (String) Use custom node attributes to take hardware configuration into account when allocating shards + // Use custom node attributes to take hardware configuration into account when allocating shards + // +kubebuilder:validation:Optional + ClusterRoutingAllocationAwarenessAttributes *string `json:"clusterRoutingAllocationAwarenessAttributes,omitempty" tf:"cluster_routing_allocation_awareness_attributes,omitempty"` + + // (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + // Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + // +kubebuilder:validation:Optional + ClusterRoutingAllocationBalanceIndex *float64 `json:"clusterRoutingAllocationBalanceIndex,omitempty" tf:"cluster_routing_allocation_balance_index,omitempty"` + + // (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + // Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + // +kubebuilder:validation:Optional + ClusterRoutingAllocationBalanceShard *float64 `json:"clusterRoutingAllocationBalanceShard,omitempty" tf:"cluster_routing_allocation_balance_shard,omitempty"` + + // (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + // Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + // +kubebuilder:validation:Optional + ClusterRoutingAllocationBalanceThreshold *float64 `json:"clusterRoutingAllocationBalanceThreshold,omitempty" tf:"cluster_routing_allocation_balance_threshold,omitempty"` + + // (Number) How many concurrent shard rebalances are allowed cluster wide + // How many concurrent shard rebalances are allowed cluster wide + // +kubebuilder:validation:Optional + ClusterRoutingAllocationClusterConcurrentRebalance *float64 `json:"clusterRoutingAllocationClusterConcurrentRebalance,omitempty" tf:"cluster_routing_allocation_cluster_concurrent_rebalance,omitempty"` + + // (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + // Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + // +kubebuilder:validation:Optional + ClusterRoutingAllocationDiskIncludeRelocations *bool `json:"clusterRoutingAllocationDiskIncludeRelocations,omitempty" tf:"cluster_routing_allocation_disk_include_relocations,omitempty"` + + // (Boolean) Whether the disk allocation decider is active + // Whether the disk allocation decider is active + // +kubebuilder:validation:Optional + ClusterRoutingAllocationDiskThresholdEnabled *bool `json:"clusterRoutingAllocationDiskThresholdEnabled,omitempty" tf:"cluster_routing_allocation_disk_threshold_enabled,omitempty"` + + // (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + // Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + // +kubebuilder:validation:Optional + ClusterRoutingAllocationDiskWatermarkHigh *string `json:"clusterRoutingAllocationDiskWatermarkHigh,omitempty" tf:"cluster_routing_allocation_disk_watermark_high,omitempty"` + + // (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + // Allocator will not allocate shards to nodes that have more than this percentage disk used + // +kubebuilder:validation:Optional + ClusterRoutingAllocationDiskWatermarkLow *string `json:"clusterRoutingAllocationDiskWatermarkLow,omitempty" tf:"cluster_routing_allocation_disk_watermark_low,omitempty"` + + // (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + // Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + // +kubebuilder:validation:Optional + ClusterRoutingAllocationEnable *string `json:"clusterRoutingAllocationEnable,omitempty" tf:"cluster_routing_allocation_enable,omitempty"` + + // (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + // How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + // +kubebuilder:validation:Optional + ClusterRoutingAllocationNodeConcurrentIncomingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentIncomingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_incoming_recoveries,omitempty"` + + // (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + // How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + // +kubebuilder:validation:Optional + ClusterRoutingAllocationNodeConcurrentOutgoingRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentOutgoingRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_outgoing_recoveries,omitempty"` + + // (Number) A shortcut to set both incoming and outgoing recoveries + // A shortcut to set both incoming and outgoing recoveries + // +kubebuilder:validation:Optional + ClusterRoutingAllocationNodeConcurrentRecoveries *float64 `json:"clusterRoutingAllocationNodeConcurrentRecoveries,omitempty" tf:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + + // (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + // Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + // +kubebuilder:validation:Optional + ClusterRoutingAllocationNodeInitialPrimariesRecoveries *float64 `json:"clusterRoutingAllocationNodeInitialPrimariesRecoveries,omitempty" tf:"cluster_routing_allocation_node_initial_primaries_recoveries,omitempty"` + + // (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + // Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + // +kubebuilder:validation:Optional + ClusterRoutingAllocationSameShardHost *bool `json:"clusterRoutingAllocationSameShardHost,omitempty" tf:"cluster_routing_allocation_same_shard_host,omitempty"` + + // (Number) Maximum number of primary and replica shards allocated to each node + // Maximum number of primary and replica shards allocated to each node + // +kubebuilder:validation:Optional + ClusterRoutingAllocationTotalShardsPerNode *float64 `json:"clusterRoutingAllocationTotalShardsPerNode,omitempty" tf:"cluster_routing_allocation_total_shards_per_node,omitempty"` + + // (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + // Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + // +kubebuilder:validation:Optional + ClusterRoutingRebalanceEnable *string `json:"clusterRoutingRebalanceEnable,omitempty" tf:"cluster_routing_rebalance_enable,omitempty"` + + // (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + // The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + // +kubebuilder:validation:Optional + IndicesBreakerFielddataLimit *string `json:"indicesBreakerFielddataLimit,omitempty" tf:"indices_breaker_fielddata_limit,omitempty"` + + // (Number) A constant that all field data estimations are multiplied by + // A constant that all field data estimations are multiplied by + // +kubebuilder:validation:Optional + IndicesBreakerFielddataOverhead *float64 `json:"indicesBreakerFielddataOverhead,omitempty" tf:"indices_breaker_fielddata_overhead,omitempty"` + + // request data structures (e.g. calculating aggregations) are prevented from exceeding + // The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + // +kubebuilder:validation:Optional + IndicesBreakerRequestLimit *string `json:"indicesBreakerRequestLimit,omitempty" tf:"indices_breaker_request_limit,omitempty"` + + // (Number) A constant that all request estimations are multiplied by + // A constant that all request estimations are multiplied by + // +kubebuilder:validation:Optional + IndicesBreakerRequestOverhead *float64 `json:"indicesBreakerRequestOverhead,omitempty" tf:"indices_breaker_request_overhead,omitempty"` + + // (String) The percentage of total amount of memory that can be used across all breakers + // The percentage of total amount of memory that can be used across all breakers + // +kubebuilder:validation:Optional + IndicesBreakerTotalLimit *string `json:"indicesBreakerTotalLimit,omitempty" tf:"indices_breaker_total_limit,omitempty"` + + // (String) Maximum total inbound and outbound recovery traffic for each node, in mb + // Maximum total inbound and outbound recovery traffic for each node, in mb + // +kubebuilder:validation:Optional + IndicesRecoveryMaxBytesPerSec *string `json:"indicesRecoveryMaxBytesPerSec,omitempty" tf:"indices_recovery_max_bytes_per_sec,omitempty"` + + // (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + // The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + // +kubebuilder:validation:Optional + NetworkBreakerInflightRequestsLimit *string `json:"networkBreakerInflightRequestsLimit,omitempty" tf:"network_breaker_inflight_requests_limit,omitempty"` + + // (Number) A constant that all in flight requests estimations are multiplied by + // A constant that all in flight requests estimations are multiplied by + // +kubebuilder:validation:Optional + NetworkBreakerInflightRequestsOverhead *float64 `json:"networkBreakerInflightRequestsOverhead,omitempty" tf:"network_breaker_inflight_requests_overhead,omitempty"` + + // (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + // Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + // +kubebuilder:validation:Optional + ScriptMaxCompilationsRate *string `json:"scriptMaxCompilationsRate,omitempty" tf:"script_max_compilations_rate,omitempty"` + + // wide default timeout for all search requests + // A time string setting a cluster-wide default timeout for all search requests + // +kubebuilder:validation:Optional + SearchDefaultSearchTimeout *string `json:"searchDefaultSearchTimeout,omitempty" tf:"search_default_search_timeout,omitempty"` +} + +// SettingsSpec defines the desired state of Settings +type SettingsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SettingsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SettingsInitParameters `json:"initProvider,omitempty"` +} + +// SettingsStatus defines the observed state of Settings. +type SettingsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SettingsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Settings is the Schema for the Settingss API. Manages a cluster's (persistent) settings. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Settings struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SettingsSpec `json:"spec"` + Status SettingsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SettingsList contains a list of Settingss +type SettingsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Settings `json:"items"` +} + +// Repository type metadata. +var ( + Settings_Kind = "Settings" + Settings_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Settings_Kind}.String() + Settings_KindAPIVersion = Settings_Kind + "." + CRDGroupVersion.String() + Settings_GroupVersionKind = CRDGroupVersion.WithKind(Settings_Kind) +) + +func init() { + SchemeBuilder.Register(&Settings{}, &SettingsList{}) +} diff --git a/apis/component/v1alpha1/zz_generated.conversion_hubs.go b/apis/component/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..0ce11be --- /dev/null +++ b/apis/component/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Template) Hub() {} diff --git a/apis/component/v1alpha1/zz_generated.deepcopy.go b/apis/component/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..d2bdb72 --- /dev/null +++ b/apis/component/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Template) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInitParameters) DeepCopyInto(out *TemplateInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInitParameters. +func (in *TemplateInitParameters) DeepCopy() *TemplateInitParameters { + if in == nil { + return nil + } + out := new(TemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateList) DeepCopyInto(out *TemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. +func (in *TemplateList) DeepCopy() *TemplateList { + if in == nil { + return nil + } + out := new(TemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateObservation) DeepCopyInto(out *TemplateObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateObservation. +func (in *TemplateObservation) DeepCopy() *TemplateObservation { + if in == nil { + return nil + } + out := new(TemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameters) DeepCopyInto(out *TemplateParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameters. +func (in *TemplateParameters) DeepCopy() *TemplateParameters { + if in == nil { + return nil + } + out := new(TemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSpec) DeepCopyInto(out *TemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSpec. +func (in *TemplateSpec) DeepCopy() *TemplateSpec { + if in == nil { + return nil + } + out := new(TemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateStatus) DeepCopyInto(out *TemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateStatus. +func (in *TemplateStatus) DeepCopy() *TemplateStatus { + if in == nil { + return nil + } + out := new(TemplateStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/component/v1alpha1/zz_generated.managed.go b/apis/component/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..5f11b5a --- /dev/null +++ b/apis/component/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Template. +func (mg *Template) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Template. +func (mg *Template) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Template. +func (mg *Template) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Template. +func (mg *Template) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Template. +func (mg *Template) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Template. +func (mg *Template) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Template. +func (mg *Template) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Template. +func (mg *Template) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Template. +func (mg *Template) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Template. +func (mg *Template) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Template. +func (mg *Template) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Template. +func (mg *Template) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/component/v1alpha1/zz_generated.managedlist.go b/apis/component/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..b2b0db1 --- /dev/null +++ b/apis/component/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TemplateList. +func (l *TemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/component/v1alpha1/zz_groupversion_info.go b/apis/component/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..ff047b0 --- /dev/null +++ b/apis/component/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=component.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "component.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/component/v1alpha1/zz_template_terraformed.go b/apis/component/v1alpha1/zz_template_terraformed.go new file mode 100755 index 0000000..c54a2da --- /dev/null +++ b/apis/component/v1alpha1/zz_template_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Template +func (mg *Template) GetTerraformResourceType() string { + return "opensearch_component_template" +} + +// GetConnectionDetailsMapping for this Template +func (tr *Template) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Template +func (tr *Template) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Template +func (tr *Template) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Template +func (tr *Template) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Template +func (tr *Template) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Template +func (tr *Template) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Template +func (tr *Template) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Template +func (tr *Template) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Template using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Template) LateInitialize(attrs []byte) (bool, error) { + params := &TemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Template) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/component/v1alpha1/zz_template_types.go b/apis/component/v1alpha1/zz_template_types.go new file mode 100755 index 0000000..52a6236 --- /dev/null +++ b/apis/component/v1alpha1/zz_template_types.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TemplateInitParameters struct { + + // (String) The JSON body of the template. + // The JSON body of the template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) Name of the component template to create. + // Name of the component template to create. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TemplateObservation struct { + + // (String) The JSON body of the template. + // The JSON body of the template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) Name of the component template to create. + // Name of the component template to create. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TemplateParameters struct { + + // (String) The JSON body of the template. + // The JSON body of the template. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) Name of the component template to create. + // Name of the component template to create. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// TemplateSpec defines the desired state of Template +type TemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TemplateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TemplateInitParameters `json:"initProvider,omitempty"` +} + +// TemplateStatus defines the observed state of Template. +type TemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Template is the Schema for the Templates API. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template’s composed_of list. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Template struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TemplateSpec `json:"spec"` + Status TemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TemplateList contains a list of Templates +type TemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Template `json:"items"` +} + +// Repository type metadata. +var ( + Template_Kind = "Template" + Template_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Template_Kind}.String() + Template_KindAPIVersion = Template_Kind + "." + CRDGroupVersion.String() + Template_GroupVersionKind = CRDGroupVersion.WithKind(Template_Kind) +) + +func init() { + SchemeBuilder.Register(&Template{}, &TemplateList{}) +} diff --git a/apis/composable/v1alpha1/zz_generated.conversion_hubs.go b/apis/composable/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..5905664 --- /dev/null +++ b/apis/composable/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *IndexTemplate) Hub() {} diff --git a/apis/composable/v1alpha1/zz_generated.deepcopy.go b/apis/composable/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..4a80f04 --- /dev/null +++ b/apis/composable/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplate) DeepCopyInto(out *IndexTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplate. +func (in *IndexTemplate) DeepCopy() *IndexTemplate { + if in == nil { + return nil + } + out := new(IndexTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateInitParameters) DeepCopyInto(out *IndexTemplateInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateInitParameters. +func (in *IndexTemplateInitParameters) DeepCopy() *IndexTemplateInitParameters { + if in == nil { + return nil + } + out := new(IndexTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateList) DeepCopyInto(out *IndexTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IndexTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateList. +func (in *IndexTemplateList) DeepCopy() *IndexTemplateList { + if in == nil { + return nil + } + out := new(IndexTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateObservation) DeepCopyInto(out *IndexTemplateObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateObservation. +func (in *IndexTemplateObservation) DeepCopy() *IndexTemplateObservation { + if in == nil { + return nil + } + out := new(IndexTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateParameters) DeepCopyInto(out *IndexTemplateParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateParameters. +func (in *IndexTemplateParameters) DeepCopy() *IndexTemplateParameters { + if in == nil { + return nil + } + out := new(IndexTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateSpec) DeepCopyInto(out *IndexTemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateSpec. +func (in *IndexTemplateSpec) DeepCopy() *IndexTemplateSpec { + if in == nil { + return nil + } + out := new(IndexTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexTemplateStatus) DeepCopyInto(out *IndexTemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateStatus. +func (in *IndexTemplateStatus) DeepCopy() *IndexTemplateStatus { + if in == nil { + return nil + } + out := new(IndexTemplateStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/composable/v1alpha1/zz_generated.managed.go b/apis/composable/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..3ac3be7 --- /dev/null +++ b/apis/composable/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IndexTemplate. +func (mg *IndexTemplate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IndexTemplate. +func (mg *IndexTemplate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IndexTemplate. +func (mg *IndexTemplate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IndexTemplate. +func (mg *IndexTemplate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IndexTemplate. +func (mg *IndexTemplate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IndexTemplate. +func (mg *IndexTemplate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IndexTemplate. +func (mg *IndexTemplate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IndexTemplate. +func (mg *IndexTemplate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IndexTemplate. +func (mg *IndexTemplate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IndexTemplate. +func (mg *IndexTemplate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IndexTemplate. +func (mg *IndexTemplate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IndexTemplate. +func (mg *IndexTemplate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/composable/v1alpha1/zz_generated.managedlist.go b/apis/composable/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..4436aaa --- /dev/null +++ b/apis/composable/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IndexTemplateList. +func (l *IndexTemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/composable/v1alpha1/zz_groupversion_info.go b/apis/composable/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..2928000 --- /dev/null +++ b/apis/composable/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=composable.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "composable.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/composable/v1alpha1/zz_indextemplate_terraformed.go b/apis/composable/v1alpha1/zz_indextemplate_terraformed.go new file mode 100755 index 0000000..004724f --- /dev/null +++ b/apis/composable/v1alpha1/zz_indextemplate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IndexTemplate +func (mg *IndexTemplate) GetTerraformResourceType() string { + return "opensearch_composable_index_template" +} + +// GetConnectionDetailsMapping for this IndexTemplate +func (tr *IndexTemplate) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IndexTemplate +func (tr *IndexTemplate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IndexTemplate +func (tr *IndexTemplate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IndexTemplate +func (tr *IndexTemplate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IndexTemplate +func (tr *IndexTemplate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IndexTemplate +func (tr *IndexTemplate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IndexTemplate +func (tr *IndexTemplate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IndexTemplate +func (tr *IndexTemplate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IndexTemplate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IndexTemplate) LateInitialize(attrs []byte) (bool, error) { + params := &IndexTemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IndexTemplate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/composable/v1alpha1/zz_indextemplate_types.go b/apis/composable/v1alpha1/zz_indextemplate_types.go new file mode 100755 index 0000000..0b9b1ea --- /dev/null +++ b/apis/composable/v1alpha1/zz_indextemplate_types.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IndexTemplateInitParameters struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IndexTemplateObservation struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IndexTemplateParameters struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// IndexTemplateSpec defines the desired state of IndexTemplate +type IndexTemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IndexTemplateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IndexTemplateInitParameters `json:"initProvider,omitempty"` +} + +// IndexTemplateStatus defines the observed state of IndexTemplate. +type IndexTemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IndexTemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// IndexTemplate is the Schema for the IndexTemplates API. Provides an Composable index template resource. This resource uses the /_index_template endpoint of the API that is available since version 2.0.0. Use opensearch_index_template if you are using older versions or if you want to keep using legacy Index Templates. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type IndexTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec IndexTemplateSpec `json:"spec"` + Status IndexTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IndexTemplateList contains a list of IndexTemplates +type IndexTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IndexTemplate `json:"items"` +} + +// Repository type metadata. +var ( + IndexTemplate_Kind = "IndexTemplate" + IndexTemplate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IndexTemplate_Kind}.String() + IndexTemplate_KindAPIVersion = IndexTemplate_Kind + "." + CRDGroupVersion.String() + IndexTemplate_GroupVersionKind = CRDGroupVersion.WithKind(IndexTemplate_Kind) +) + +func init() { + SchemeBuilder.Register(&IndexTemplate{}, &IndexTemplateList{}) +} diff --git a/apis/dashboard/v1alpha1/zz_generated.conversion_hubs.go b/apis/dashboard/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..91e877a --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Object) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Tenant) Hub() {} diff --git a/apis/dashboard/v1alpha1/zz_generated.deepcopy.go b/apis/dashboard/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..5f11271 --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,362 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Object) DeepCopyInto(out *Object) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Object) DeepCopy() *Object { + if in == nil { + return nil + } + out := new(Object) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Object) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectInitParameters) DeepCopyInto(out *ObjectInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInitParameters. +func (in *ObjectInitParameters) DeepCopy() *ObjectInitParameters { + if in == nil { + return nil + } + out := new(ObjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectList) DeepCopyInto(out *ObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Object, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectList. +func (in *ObjectList) DeepCopy() *ObjectList { + if in == nil { + return nil + } + out := new(ObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectObservation) DeepCopyInto(out *ObjectObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectObservation. +func (in *ObjectObservation) DeepCopy() *ObjectObservation { + if in == nil { + return nil + } + out := new(ObjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectParameters) DeepCopyInto(out *ObjectParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectParameters. +func (in *ObjectParameters) DeepCopy() *ObjectParameters { + if in == nil { + return nil + } + out := new(ObjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. +func (in *ObjectSpec) DeepCopy() *ObjectSpec { + if in == nil { + return nil + } + out := new(ObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStatus) DeepCopyInto(out *ObjectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStatus. +func (in *ObjectStatus) DeepCopy() *ObjectStatus { + if in == nil { + return nil + } + out := new(ObjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tenant) DeepCopyInto(out *Tenant) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tenant. +func (in *Tenant) DeepCopy() *Tenant { + if in == nil { + return nil + } + out := new(Tenant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Tenant) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantInitParameters) DeepCopyInto(out *TenantInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.TenantName != nil { + in, out := &in.TenantName, &out.TenantName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantInitParameters. +func (in *TenantInitParameters) DeepCopy() *TenantInitParameters { + if in == nil { + return nil + } + out := new(TenantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantList) DeepCopyInto(out *TenantList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Tenant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantList. +func (in *TenantList) DeepCopy() *TenantList { + if in == nil { + return nil + } + out := new(TenantList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TenantList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantObservation) DeepCopyInto(out *TenantObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.TenantName != nil { + in, out := &in.TenantName, &out.TenantName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantObservation. +func (in *TenantObservation) DeepCopy() *TenantObservation { + if in == nil { + return nil + } + out := new(TenantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantParameters) DeepCopyInto(out *TenantParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.TenantName != nil { + in, out := &in.TenantName, &out.TenantName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantParameters. +func (in *TenantParameters) DeepCopy() *TenantParameters { + if in == nil { + return nil + } + out := new(TenantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantSpec) DeepCopyInto(out *TenantSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSpec. +func (in *TenantSpec) DeepCopy() *TenantSpec { + if in == nil { + return nil + } + out := new(TenantSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantStatus) DeepCopyInto(out *TenantStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantStatus. +func (in *TenantStatus) DeepCopy() *TenantStatus { + if in == nil { + return nil + } + out := new(TenantStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dashboard/v1alpha1/zz_generated.managed.go b/apis/dashboard/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..21a6c72 --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_generated.managed.go @@ -0,0 +1,125 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Object. +func (mg *Object) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Object. +func (mg *Object) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Object. +func (mg *Object) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Object. +func (mg *Object) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Object. +func (mg *Object) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Object. +func (mg *Object) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Object. +func (mg *Object) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Object. +func (mg *Object) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Object. +func (mg *Object) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Object. +func (mg *Object) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Object. +func (mg *Object) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Object. +func (mg *Object) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Tenant. +func (mg *Tenant) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Tenant. +func (mg *Tenant) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Tenant. +func (mg *Tenant) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Tenant. +func (mg *Tenant) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Tenant. +func (mg *Tenant) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Tenant. +func (mg *Tenant) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Tenant. +func (mg *Tenant) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Tenant. +func (mg *Tenant) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Tenant. +func (mg *Tenant) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Tenant. +func (mg *Tenant) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Tenant. +func (mg *Tenant) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Tenant. +func (mg *Tenant) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dashboard/v1alpha1/zz_generated.managedlist.go b/apis/dashboard/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..161245b --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,23 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ObjectList. +func (l *ObjectList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TenantList. +func (l *TenantList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dashboard/v1alpha1/zz_groupversion_info.go b/apis/dashboard/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..2661ad6 --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dashboard.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dashboard.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dashboard/v1alpha1/zz_object_terraformed.go b/apis/dashboard/v1alpha1/zz_object_terraformed.go new file mode 100755 index 0000000..6bdec33 --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_object_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Object +func (mg *Object) GetTerraformResourceType() string { + return "opensearch_dashboard_object" +} + +// GetConnectionDetailsMapping for this Object +func (tr *Object) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Object +func (tr *Object) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Object +func (tr *Object) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Object +func (tr *Object) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Object +func (tr *Object) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Object +func (tr *Object) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Object +func (tr *Object) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Object +func (tr *Object) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Object using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Object) LateInitialize(attrs []byte) (bool, error) { + params := &ObjectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Object) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dashboard/v1alpha1/zz_object_types.go b/apis/dashboard/v1alpha1/zz_object_types.go new file mode 100755 index 0000000..0489cf7 --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_object_types.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ObjectInitParameters struct { + + // (String) The JSON body of the dashboard object. + // The JSON body of the dashboard object. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index where dashboard data is stored. + // The name of the index where dashboard data is stored. + Index *string `json:"index,omitempty" tf:"index,omitempty"` +} + +type ObjectObservation struct { + + // (String) The JSON body of the dashboard object. + // The JSON body of the dashboard object. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the index where dashboard data is stored. + // The name of the index where dashboard data is stored. + Index *string `json:"index,omitempty" tf:"index,omitempty"` +} + +type ObjectParameters struct { + + // (String) The JSON body of the dashboard object. + // The JSON body of the dashboard object. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index where dashboard data is stored. + // The name of the index where dashboard data is stored. + // +kubebuilder:validation:Optional + Index *string `json:"index,omitempty" tf:"index,omitempty"` +} + +// ObjectSpec defines the desired state of Object +type ObjectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ObjectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ObjectInitParameters `json:"initProvider,omitempty"` +} + +// ObjectStatus defines the observed state of Object. +type ObjectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ObjectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Object is the Schema for the Objects API. Provides an OpenSearch Dashboards object resource. This resource interacts directly with the underlying OpenSearch index backing Dashboards, so the format must match what Dashboards the version of Dashboards is expecting. Dashboards with older versions - directly pulling the JSON from a Dashboards index of the same version of OpenSearch targeted by the provider is a workaround. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Object struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + Spec ObjectSpec `json:"spec"` + Status ObjectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObjectList contains a list of Objects +type ObjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Object `json:"items"` +} + +// Repository type metadata. +var ( + Object_Kind = "Object" + Object_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Object_Kind}.String() + Object_KindAPIVersion = Object_Kind + "." + CRDGroupVersion.String() + Object_GroupVersionKind = CRDGroupVersion.WithKind(Object_Kind) +) + +func init() { + SchemeBuilder.Register(&Object{}, &ObjectList{}) +} diff --git a/apis/dashboard/v1alpha1/zz_tenant_terraformed.go b/apis/dashboard/v1alpha1/zz_tenant_terraformed.go new file mode 100755 index 0000000..8ac861d --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_tenant_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Tenant +func (mg *Tenant) GetTerraformResourceType() string { + return "opensearch_dashboard_tenant" +} + +// GetConnectionDetailsMapping for this Tenant +func (tr *Tenant) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Tenant +func (tr *Tenant) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Tenant +func (tr *Tenant) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Tenant +func (tr *Tenant) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Tenant +func (tr *Tenant) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Tenant +func (tr *Tenant) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Tenant +func (tr *Tenant) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Tenant +func (tr *Tenant) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Tenant using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Tenant) LateInitialize(attrs []byte) (bool, error) { + params := &TenantParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Tenant) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dashboard/v1alpha1/zz_tenant_types.go b/apis/dashboard/v1alpha1/zz_tenant_types.go new file mode 100755 index 0000000..78899ce --- /dev/null +++ b/apis/dashboard/v1alpha1/zz_tenant_types.go @@ -0,0 +1,117 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TenantInitParameters struct { + + // (String) Description of the tenant. + // Description of the tenant. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The name of the tenant. + // The name of the tenant. + TenantName *string `json:"tenantName,omitempty" tf:"tenant_name,omitempty"` +} + +type TenantObservation struct { + + // (String) Description of the tenant. + // Description of the tenant. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) + Index *string `json:"index,omitempty" tf:"index,omitempty"` + + // (String) The name of the tenant. + // The name of the tenant. + TenantName *string `json:"tenantName,omitempty" tf:"tenant_name,omitempty"` +} + +type TenantParameters struct { + + // (String) Description of the tenant. + // Description of the tenant. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The name of the tenant. + // The name of the tenant. + // +kubebuilder:validation:Optional + TenantName *string `json:"tenantName,omitempty" tf:"tenant_name,omitempty"` +} + +// TenantSpec defines the desired state of Tenant +type TenantSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TenantParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TenantInitParameters `json:"initProvider,omitempty"` +} + +// TenantStatus defines the observed state of Tenant. +type TenantStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TenantObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Tenant is the Schema for the Tenants API. Provides an OpenSearch dashboard tenant resource. Please refer to the OpenSearch documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Tenant struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tenantName) || (has(self.initProvider) && has(self.initProvider.tenantName))",message="spec.forProvider.tenantName is a required parameter" + Spec TenantSpec `json:"spec"` + Status TenantStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TenantList contains a list of Tenants +type TenantList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Tenant `json:"items"` +} + +// Repository type metadata. +var ( + Tenant_Kind = "Tenant" + Tenant_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Tenant_Kind}.String() + Tenant_KindAPIVersion = Tenant_Kind + "." + CRDGroupVersion.String() + Tenant_GroupVersionKind = CRDGroupVersion.WithKind(Tenant_Kind) +) + +func init() { + SchemeBuilder.Register(&Tenant{}, &TenantList{}) +} diff --git a/apis/data/v1alpha1/zz_generated.conversion_hubs.go b/apis/data/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..271eb40 --- /dev/null +++ b/apis/data/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Stream) Hub() {} diff --git a/apis/data/v1alpha1/zz_generated.deepcopy.go b/apis/data/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b0221e8 --- /dev/null +++ b/apis/data/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,168 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stream) DeepCopyInto(out *Stream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream. +func (in *Stream) DeepCopy() *Stream { + if in == nil { + return nil + } + out := new(Stream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInitParameters) DeepCopyInto(out *StreamInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInitParameters. +func (in *StreamInitParameters) DeepCopy() *StreamInitParameters { + if in == nil { + return nil + } + out := new(StreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamList) DeepCopyInto(out *StreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList. +func (in *StreamList) DeepCopy() *StreamList { + if in == nil { + return nil + } + out := new(StreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservation) DeepCopyInto(out *StreamObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservation. +func (in *StreamObservation) DeepCopy() *StreamObservation { + if in == nil { + return nil + } + out := new(StreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamParameters) DeepCopyInto(out *StreamParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamParameters. +func (in *StreamParameters) DeepCopy() *StreamParameters { + if in == nil { + return nil + } + out := new(StreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamSpec) DeepCopyInto(out *StreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec. +func (in *StreamSpec) DeepCopy() *StreamSpec { + if in == nil { + return nil + } + out := new(StreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamStatus) DeepCopyInto(out *StreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus. +func (in *StreamStatus) DeepCopy() *StreamStatus { + if in == nil { + return nil + } + out := new(StreamStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/data/v1alpha1/zz_generated.managed.go b/apis/data/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..a556d02 --- /dev/null +++ b/apis/data/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Stream. +func (mg *Stream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stream. +func (mg *Stream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stream. +func (mg *Stream) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stream. +func (mg *Stream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stream. +func (mg *Stream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stream. +func (mg *Stream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stream. +func (mg *Stream) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stream. +func (mg *Stream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/data/v1alpha1/zz_generated.managedlist.go b/apis/data/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..2b8d5f5 --- /dev/null +++ b/apis/data/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StreamList. +func (l *StreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/data/v1alpha1/zz_groupversion_info.go b/apis/data/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..2edd717 --- /dev/null +++ b/apis/data/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=data.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "data.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/data/v1alpha1/zz_stream_terraformed.go b/apis/data/v1alpha1/zz_stream_terraformed.go new file mode 100755 index 0000000..c80a498 --- /dev/null +++ b/apis/data/v1alpha1/zz_stream_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stream +func (mg *Stream) GetTerraformResourceType() string { + return "opensearch_data_stream" +} + +// GetConnectionDetailsMapping for this Stream +func (tr *Stream) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stream +func (tr *Stream) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stream +func (tr *Stream) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stream +func (tr *Stream) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stream +func (tr *Stream) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stream +func (tr *Stream) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stream using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stream) LateInitialize(attrs []byte) (bool, error) { + params := &StreamParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stream) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/data/v1alpha1/zz_stream_types.go b/apis/data/v1alpha1/zz_stream_types.go new file mode 100755 index 0000000..ea5f37a --- /dev/null +++ b/apis/data/v1alpha1/zz_stream_types.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StreamInitParameters struct { + + // (String) Name of the data stream to create, must have a matching + // Name of the data stream to create, must have a matching + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StreamObservation struct { + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) Name of the data stream to create, must have a matching + // Name of the data stream to create, must have a matching + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StreamParameters struct { + + // (String) Name of the data stream to create, must have a matching + // Name of the data stream to create, must have a matching + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// StreamSpec defines the desired state of Stream +type StreamSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInitParameters `json:"initProvider,omitempty"` +} + +// StreamStatus defines the observed state of Stream. +type StreamStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Stream is the Schema for the Streams API. A data stream lets you store append-only time series data across multiple (hidden, auto-generated) indices while giving you a single named resource for requests +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Stream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec StreamSpec `json:"spec"` + Status StreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamList contains a list of Streams +type StreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stream `json:"items"` +} + +// Repository type metadata. +var ( + Stream_Kind = "Stream" + Stream_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stream_Kind}.String() + Stream_KindAPIVersion = Stream_Kind + "." + CRDGroupVersion.String() + Stream_GroupVersionKind = CRDGroupVersion.WithKind(Stream_Kind) +) + +func init() { + SchemeBuilder.Register(&Stream{}, &StreamList{}) +} diff --git a/apis/index/v1alpha1/zz_generated.conversion_hubs.go b/apis/index/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..0ce11be --- /dev/null +++ b/apis/index/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Template) Hub() {} diff --git a/apis/index/v1alpha1/zz_generated.deepcopy.go b/apis/index/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..d2bdb72 --- /dev/null +++ b/apis/index/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Template) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInitParameters) DeepCopyInto(out *TemplateInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInitParameters. +func (in *TemplateInitParameters) DeepCopy() *TemplateInitParameters { + if in == nil { + return nil + } + out := new(TemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateList) DeepCopyInto(out *TemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. +func (in *TemplateList) DeepCopy() *TemplateList { + if in == nil { + return nil + } + out := new(TemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateObservation) DeepCopyInto(out *TemplateObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateObservation. +func (in *TemplateObservation) DeepCopy() *TemplateObservation { + if in == nil { + return nil + } + out := new(TemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameters) DeepCopyInto(out *TemplateParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameters. +func (in *TemplateParameters) DeepCopy() *TemplateParameters { + if in == nil { + return nil + } + out := new(TemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSpec) DeepCopyInto(out *TemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSpec. +func (in *TemplateSpec) DeepCopy() *TemplateSpec { + if in == nil { + return nil + } + out := new(TemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateStatus) DeepCopyInto(out *TemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateStatus. +func (in *TemplateStatus) DeepCopy() *TemplateStatus { + if in == nil { + return nil + } + out := new(TemplateStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/index/v1alpha1/zz_generated.managed.go b/apis/index/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..5f11b5a --- /dev/null +++ b/apis/index/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Template. +func (mg *Template) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Template. +func (mg *Template) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Template. +func (mg *Template) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Template. +func (mg *Template) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Template. +func (mg *Template) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Template. +func (mg *Template) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Template. +func (mg *Template) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Template. +func (mg *Template) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Template. +func (mg *Template) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Template. +func (mg *Template) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Template. +func (mg *Template) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Template. +func (mg *Template) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/index/v1alpha1/zz_generated.managedlist.go b/apis/index/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..b2b0db1 --- /dev/null +++ b/apis/index/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TemplateList. +func (l *TemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/index/v1alpha1/zz_groupversion_info.go b/apis/index/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..21a4a94 --- /dev/null +++ b/apis/index/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=index.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "index.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/index/v1alpha1/zz_template_terraformed.go b/apis/index/v1alpha1/zz_template_terraformed.go new file mode 100755 index 0000000..a9f2dde --- /dev/null +++ b/apis/index/v1alpha1/zz_template_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Template +func (mg *Template) GetTerraformResourceType() string { + return "opensearch_index_template" +} + +// GetConnectionDetailsMapping for this Template +func (tr *Template) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Template +func (tr *Template) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Template +func (tr *Template) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Template +func (tr *Template) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Template +func (tr *Template) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Template +func (tr *Template) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Template +func (tr *Template) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Template +func (tr *Template) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Template using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Template) LateInitialize(attrs []byte) (bool, error) { + params := &TemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Template) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/index/v1alpha1/zz_template_types.go b/apis/index/v1alpha1/zz_template_types.go new file mode 100755 index 0000000..e11001b --- /dev/null +++ b/apis/index/v1alpha1/zz_template_types.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TemplateInitParameters struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TemplateObservation struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TemplateParameters struct { + + // (String) The JSON body of the index template. + // The JSON body of the index template. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the index template. + // The name of the index template. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// TemplateSpec defines the desired state of Template +type TemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TemplateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TemplateInitParameters `json:"initProvider,omitempty"` +} + +// TemplateStatus defines the observed state of Template. +type TemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Template is the Schema for the Templates API. Provides an OpenSearch index template resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Template struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TemplateSpec `json:"spec"` + Status TemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TemplateList contains a list of Templates +type TemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Template `json:"items"` +} + +// Repository type metadata. +var ( + Template_Kind = "Template" + Template_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Template_Kind}.String() + Template_KindAPIVersion = Template_Kind + "." + CRDGroupVersion.String() + Template_GroupVersionKind = CRDGroupVersion.WithKind(Template_Kind) +) + +func init() { + SchemeBuilder.Register(&Template{}, &TemplateList{}) +} diff --git a/apis/ingest/v1alpha1/zz_generated.conversion_hubs.go b/apis/ingest/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..01a55be --- /dev/null +++ b/apis/ingest/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Pipeline) Hub() {} diff --git a/apis/ingest/v1alpha1/zz_generated.deepcopy.go b/apis/ingest/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..4c8fe54 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pipeline) DeepCopyInto(out *Pipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. +func (in *Pipeline) DeepCopy() *Pipeline { + if in == nil { + return nil + } + out := new(Pipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineInitParameters) DeepCopyInto(out *PipelineInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineInitParameters. +func (in *PipelineInitParameters) DeepCopy() *PipelineInitParameters { + if in == nil { + return nil + } + out := new(PipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineList) DeepCopyInto(out *PipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList. +func (in *PipelineList) DeepCopy() *PipelineList { + if in == nil { + return nil + } + out := new(PipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineObservation) DeepCopyInto(out *PipelineObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineObservation. +func (in *PipelineObservation) DeepCopy() *PipelineObservation { + if in == nil { + return nil + } + out := new(PipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameters) DeepCopyInto(out *PipelineParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameters. +func (in *PipelineParameters) DeepCopy() *PipelineParameters { + if in == nil { + return nil + } + out := new(PipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec. +func (in *PipelineSpec) DeepCopy() *PipelineSpec { + if in == nil { + return nil + } + out := new(PipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus. +func (in *PipelineStatus) DeepCopy() *PipelineStatus { + if in == nil { + return nil + } + out := new(PipelineStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ingest/v1alpha1/zz_generated.managed.go b/apis/ingest/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..18426e9 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Pipeline. +func (mg *Pipeline) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Pipeline. +func (mg *Pipeline) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Pipeline. +func (mg *Pipeline) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Pipeline. +func (mg *Pipeline) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Pipeline. +func (mg *Pipeline) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Pipeline. +func (mg *Pipeline) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Pipeline. +func (mg *Pipeline) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Pipeline. +func (mg *Pipeline) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ingest/v1alpha1/zz_generated.managedlist.go b/apis/ingest/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..c654d14 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PipelineList. +func (l *PipelineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ingest/v1alpha1/zz_groupversion_info.go b/apis/ingest/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..0d47534 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ingest.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ingest.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ingest/v1alpha1/zz_pipeline_terraformed.go b/apis/ingest/v1alpha1/zz_pipeline_terraformed.go new file mode 100755 index 0000000..85252f9 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_pipeline_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Pipeline +func (mg *Pipeline) GetTerraformResourceType() string { + return "opensearch_ingest_pipeline" +} + +// GetConnectionDetailsMapping for this Pipeline +func (tr *Pipeline) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Pipeline +func (tr *Pipeline) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Pipeline +func (tr *Pipeline) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Pipeline +func (tr *Pipeline) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Pipeline +func (tr *Pipeline) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Pipeline +func (tr *Pipeline) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Pipeline using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Pipeline) LateInitialize(attrs []byte) (bool, error) { + params := &PipelineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Pipeline) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ingest/v1alpha1/zz_pipeline_types.go b/apis/ingest/v1alpha1/zz_pipeline_types.go new file mode 100755 index 0000000..a5a0f85 --- /dev/null +++ b/apis/ingest/v1alpha1/zz_pipeline_types.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PipelineInitParameters struct { + + // (String) The JSON body of the ingest pipeline + // The JSON body of the ingest pipeline + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the ingest pipeline + // The name of the ingest pipeline + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PipelineObservation struct { + + // (String) The JSON body of the ingest pipeline + // The JSON body of the ingest pipeline + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the ingest pipeline + // The name of the ingest pipeline + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PipelineParameters struct { + + // (String) The JSON body of the ingest pipeline + // The JSON body of the ingest pipeline + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the ingest pipeline + // The name of the ingest pipeline + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// PipelineSpec defines the desired state of Pipeline +type PipelineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PipelineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PipelineInitParameters `json:"initProvider,omitempty"` +} + +// PipelineStatus defines the observed state of Pipeline. +type PipelineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PipelineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Pipeline is the Schema for the Pipelines API. Provides an OpenSearch ingest pipeline resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Pipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec PipelineSpec `json:"spec"` + Status PipelineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PipelineList contains a list of Pipelines +type PipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Pipeline `json:"items"` +} + +// Repository type metadata. +var ( + Pipeline_Kind = "Pipeline" + Pipeline_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Pipeline_Kind}.String() + Pipeline_KindAPIVersion = Pipeline_Kind + "." + CRDGroupVersion.String() + Pipeline_GroupVersionKind = CRDGroupVersion.WithKind(Pipeline_Kind) +) + +func init() { + SchemeBuilder.Register(&Pipeline{}, &PipelineList{}) +} diff --git a/apis/ism/v1alpha1/zz_generated.conversion_hubs.go b/apis/ism/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..246e243 --- /dev/null +++ b/apis/ism/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PolicyMapping) Hub() {} diff --git a/apis/ism/v1alpha1/zz_generated.deepcopy.go b/apis/ism/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ee3bd60 --- /dev/null +++ b/apis/ism/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,516 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMapping) DeepCopyInto(out *PolicyMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMapping. +func (in *PolicyMapping) DeepCopy() *PolicyMapping { + if in == nil { + return nil + } + out := new(PolicyMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingInitParameters) DeepCopyInto(out *PolicyMappingInitParameters) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Indexes != nil { + in, out := &in.Indexes, &out.Indexes + *out = new(string) + **out = **in + } + if in.IsSafe != nil { + in, out := &in.IsSafe, &out.IsSafe + *out = new(bool) + **out = **in + } + if in.ManagedIndexes != nil { + in, out := &in.ManagedIndexes, &out.ManagedIndexes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingInitParameters. +func (in *PolicyMappingInitParameters) DeepCopy() *PolicyMappingInitParameters { + if in == nil { + return nil + } + out := new(PolicyMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingList) DeepCopyInto(out *PolicyMappingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolicyMapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingList. +func (in *PolicyMappingList) DeepCopy() *PolicyMappingList { + if in == nil { + return nil + } + out := new(PolicyMappingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyMappingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingObservation) DeepCopyInto(out *PolicyMappingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Indexes != nil { + in, out := &in.Indexes, &out.Indexes + *out = new(string) + **out = **in + } + if in.IsSafe != nil { + in, out := &in.IsSafe, &out.IsSafe + *out = new(bool) + **out = **in + } + if in.ManagedIndexes != nil { + in, out := &in.ManagedIndexes, &out.ManagedIndexes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingObservation. +func (in *PolicyMappingObservation) DeepCopy() *PolicyMappingObservation { + if in == nil { + return nil + } + out := new(PolicyMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingParameters) DeepCopyInto(out *PolicyMappingParameters) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Indexes != nil { + in, out := &in.Indexes, &out.Indexes + *out = new(string) + **out = **in + } + if in.IsSafe != nil { + in, out := &in.IsSafe, &out.IsSafe + *out = new(bool) + **out = **in + } + if in.ManagedIndexes != nil { + in, out := &in.ManagedIndexes, &out.ManagedIndexes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingParameters. +func (in *PolicyMappingParameters) DeepCopy() *PolicyMappingParameters { + if in == nil { + return nil + } + out := new(PolicyMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingSpec) DeepCopyInto(out *PolicyMappingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingSpec. +func (in *PolicyMappingSpec) DeepCopy() *PolicyMappingSpec { + if in == nil { + return nil + } + out := new(PolicyMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMappingStatus) DeepCopyInto(out *PolicyMappingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMappingStatus. +func (in *PolicyMappingStatus) DeepCopy() *PolicyMappingStatus { + if in == nil { + return nil + } + out := new(PolicyMappingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ism/v1alpha1/zz_generated.managed.go b/apis/ism/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..a401d23 --- /dev/null +++ b/apis/ism/v1alpha1/zz_generated.managed.go @@ -0,0 +1,125 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Policy. +func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Policy. +func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Policy. +func (mg *Policy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Policy. +func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Policy. +func (mg *Policy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Policy. +func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Policy. +func (mg *Policy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Policy. +func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PolicyMapping. +func (mg *PolicyMapping) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PolicyMapping. +func (mg *PolicyMapping) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PolicyMapping. +func (mg *PolicyMapping) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PolicyMapping. +func (mg *PolicyMapping) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PolicyMapping. +func (mg *PolicyMapping) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PolicyMapping. +func (mg *PolicyMapping) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PolicyMapping. +func (mg *PolicyMapping) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PolicyMapping. +func (mg *PolicyMapping) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PolicyMapping. +func (mg *PolicyMapping) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PolicyMapping. +func (mg *PolicyMapping) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PolicyMapping. +func (mg *PolicyMapping) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PolicyMapping. +func (mg *PolicyMapping) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ism/v1alpha1/zz_generated.managedlist.go b/apis/ism/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..af03b4b --- /dev/null +++ b/apis/ism/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,23 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PolicyList. +func (l *PolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PolicyMappingList. +func (l *PolicyMappingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ism/v1alpha1/zz_groupversion_info.go b/apis/ism/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..ef6fd0b --- /dev/null +++ b/apis/ism/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ism.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ism.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ism/v1alpha1/zz_policy_terraformed.go b/apis/ism/v1alpha1/zz_policy_terraformed.go new file mode 100755 index 0000000..b254de6 --- /dev/null +++ b/apis/ism/v1alpha1/zz_policy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Policy +func (mg *Policy) GetTerraformResourceType() string { + return "opensearch_ism_policy" +} + +// GetConnectionDetailsMapping for this Policy +func (tr *Policy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Policy +func (tr *Policy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Policy +func (tr *Policy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Policy +func (tr *Policy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Policy +func (tr *Policy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Policy +func (tr *Policy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Policy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Policy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ism/v1alpha1/zz_policy_types.go b/apis/ism/v1alpha1/zz_policy_types.go new file mode 100755 index 0000000..6a19431 --- /dev/null +++ b/apis/ism/v1alpha1/zz_policy_types.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PolicyInitParameters struct { + + // (String) The policy document. + // The policy document. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The id of the ISM policy. + // The id of the ISM policy. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (Number) The primary term of the ISM policy version. + // The primary term of the ISM policy version. + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the ISM policy version. + // The sequence number of the ISM policy version. + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +type PolicyObservation struct { + + // (String) The policy document. + // The policy document. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The id of the ISM policy. + // The id of the ISM policy. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (Number) The primary term of the ISM policy version. + // The primary term of the ISM policy version. + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the ISM policy version. + // The sequence number of the ISM policy version. + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +type PolicyParameters struct { + + // (String) The policy document. + // The policy document. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The id of the ISM policy. + // The id of the ISM policy. + // +kubebuilder:validation:Optional + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (Number) The primary term of the ISM policy version. + // The primary term of the ISM policy version. + // +kubebuilder:validation:Optional + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the ISM policy version. + // The sequence number of the ISM policy version. + // +kubebuilder:validation:Optional + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +// PolicySpec defines the desired state of Policy +type PolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` +} + +// PolicyStatus defines the observed state of Policy. +type PolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Policy is the Schema for the Policys API. Provides an OpenSearch Index State Management (ISM) policy. Please refer to the OpenSearch ISM documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyId) || (has(self.initProvider) && has(self.initProvider.policyId))",message="spec.forProvider.policyId is a required parameter" + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyList contains a list of Policys +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Policy `json:"items"` +} + +// Repository type metadata. +var ( + Policy_Kind = "Policy" + Policy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Policy_Kind}.String() + Policy_KindAPIVersion = Policy_Kind + "." + CRDGroupVersion.String() + Policy_GroupVersionKind = CRDGroupVersion.WithKind(Policy_Kind) +) + +func init() { + SchemeBuilder.Register(&Policy{}, &PolicyList{}) +} diff --git a/apis/ism/v1alpha1/zz_policymapping_terraformed.go b/apis/ism/v1alpha1/zz_policymapping_terraformed.go new file mode 100755 index 0000000..151e721 --- /dev/null +++ b/apis/ism/v1alpha1/zz_policymapping_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PolicyMapping +func (mg *PolicyMapping) GetTerraformResourceType() string { + return "opensearch_ism_policy_mapping" +} + +// GetConnectionDetailsMapping for this PolicyMapping +func (tr *PolicyMapping) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PolicyMapping +func (tr *PolicyMapping) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PolicyMapping +func (tr *PolicyMapping) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PolicyMapping +func (tr *PolicyMapping) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PolicyMapping +func (tr *PolicyMapping) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PolicyMapping +func (tr *PolicyMapping) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PolicyMapping +func (tr *PolicyMapping) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PolicyMapping +func (tr *PolicyMapping) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PolicyMapping using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PolicyMapping) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyMappingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PolicyMapping) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ism/v1alpha1/zz_policymapping_types.go b/apis/ism/v1alpha1/zz_policymapping_types.go new file mode 100755 index 0000000..0c72ffe --- /dev/null +++ b/apis/ism/v1alpha1/zz_policymapping_types.go @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PolicyMappingInitParameters struct { + + // (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + // When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + Include []map[string]*string `json:"include,omitempty" tf:"include,omitempty"` + + // (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + // Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + Indexes *string `json:"indexes,omitempty" tf:"indexes,omitempty"` + + // (Boolean) + IsSafe *bool `json:"isSafe,omitempty" tf:"is_safe,omitempty"` + + // (Set of String) + // +listType=set + ManagedIndexes []*string `json:"managedIndexes,omitempty" tf:"managed_indexes,omitempty"` + + // (String) The name of the policy. + // The name of the policy. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (String) After a change in policy takes place, specify the state for the index to transition to + // After a change in policy takes place, specify the state for the index to transition to + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type PolicyMappingObservation struct { + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + // When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + Include []map[string]*string `json:"include,omitempty" tf:"include,omitempty"` + + // (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + // Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + Indexes *string `json:"indexes,omitempty" tf:"indexes,omitempty"` + + // (Boolean) + IsSafe *bool `json:"isSafe,omitempty" tf:"is_safe,omitempty"` + + // (Set of String) + // +listType=set + ManagedIndexes []*string `json:"managedIndexes,omitempty" tf:"managed_indexes,omitempty"` + + // (String) The name of the policy. + // The name of the policy. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (String) After a change in policy takes place, specify the state for the index to transition to + // After a change in policy takes place, specify the state for the index to transition to + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type PolicyMappingParameters struct { + + // (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + // When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + // +kubebuilder:validation:Optional + Include []map[string]*string `json:"include,omitempty" tf:"include,omitempty"` + + // (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + // Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + // +kubebuilder:validation:Optional + Indexes *string `json:"indexes,omitempty" tf:"indexes,omitempty"` + + // (Boolean) + // +kubebuilder:validation:Optional + IsSafe *bool `json:"isSafe,omitempty" tf:"is_safe,omitempty"` + + // (Set of String) + // +kubebuilder:validation:Optional + // +listType=set + ManagedIndexes []*string `json:"managedIndexes,omitempty" tf:"managed_indexes,omitempty"` + + // (String) The name of the policy. + // The name of the policy. + // +kubebuilder:validation:Optional + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // (String) After a change in policy takes place, specify the state for the index to transition to + // After a change in policy takes place, specify the state for the index to transition to + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +// PolicyMappingSpec defines the desired state of PolicyMapping +type PolicyMappingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyMappingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyMappingInitParameters `json:"initProvider,omitempty"` +} + +// PolicyMappingStatus defines the observed state of PolicyMapping. +type PolicyMappingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyMappingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PolicyMapping is the Schema for the PolicyMappings API. Provides an OpenSearch Index State Management (ISM) policy. Please refer to the OpenSearch ISM documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type PolicyMapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.indexes) || (has(self.initProvider) && has(self.initProvider.indexes))",message="spec.forProvider.indexes is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyId) || (has(self.initProvider) && has(self.initProvider.policyId))",message="spec.forProvider.policyId is a required parameter" + Spec PolicyMappingSpec `json:"spec"` + Status PolicyMappingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyMappingList contains a list of PolicyMappings +type PolicyMappingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PolicyMapping `json:"items"` +} + +// Repository type metadata. +var ( + PolicyMapping_Kind = "PolicyMapping" + PolicyMapping_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PolicyMapping_Kind}.String() + PolicyMapping_KindAPIVersion = PolicyMapping_Kind + "." + CRDGroupVersion.String() + PolicyMapping_GroupVersionKind = CRDGroupVersion.WithKind(PolicyMapping_Kind) +) + +func init() { + SchemeBuilder.Register(&PolicyMapping{}, &PolicyMappingList{}) +} diff --git a/apis/opensearch/v1alpha1/zz_generated.conversion_hubs.go b/apis/opensearch/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..b34eb53 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Index) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Monitor) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Role) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Script) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} diff --git a/apis/opensearch/v1alpha1/zz_generated.deepcopy.go b/apis/opensearch/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..6425b7b --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2249 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Index) DeepCopyInto(out *Index) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Index. +func (in *Index) DeepCopy() *Index { + if in == nil { + return nil + } + out := new(Index) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Index) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexInitParameters) DeepCopyInto(out *IndexInitParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = new(string) + **out = **in + } + if in.AnalysisAnalyzer != nil { + in, out := &in.AnalysisAnalyzer, &out.AnalysisAnalyzer + *out = new(string) + **out = **in + } + if in.AnalysisCharFilter != nil { + in, out := &in.AnalysisCharFilter, &out.AnalysisCharFilter + *out = new(string) + **out = **in + } + if in.AnalysisFilter != nil { + in, out := &in.AnalysisFilter, &out.AnalysisFilter + *out = new(string) + **out = **in + } + if in.AnalysisNormalizer != nil { + in, out := &in.AnalysisNormalizer, &out.AnalysisNormalizer + *out = new(string) + **out = **in + } + if in.AnalysisTokenizer != nil { + in, out := &in.AnalysisTokenizer, &out.AnalysisTokenizer + *out = new(string) + **out = **in + } + if in.AnalyzeMaxTokenCount != nil { + in, out := &in.AnalyzeMaxTokenCount, &out.AnalyzeMaxTokenCount + *out = new(string) + **out = **in + } + if in.AutoExpandReplicas != nil { + in, out := &in.AutoExpandReplicas, &out.AutoExpandReplicas + *out = new(string) + **out = **in + } + if in.BlocksMetadata != nil { + in, out := &in.BlocksMetadata, &out.BlocksMetadata + *out = new(bool) + **out = **in + } + if in.BlocksRead != nil { + in, out := &in.BlocksRead, &out.BlocksRead + *out = new(bool) + **out = **in + } + if in.BlocksReadOnly != nil { + in, out := &in.BlocksReadOnly, &out.BlocksReadOnly + *out = new(bool) + **out = **in + } + if in.BlocksReadOnlyAllowDelete != nil { + in, out := &in.BlocksReadOnlyAllowDelete, &out.BlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.BlocksWrite != nil { + in, out := &in.BlocksWrite, &out.BlocksWrite + *out = new(bool) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DefaultPipeline != nil { + in, out := &in.DefaultPipeline, &out.DefaultPipeline + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.GcDeletes != nil { + in, out := &in.GcDeletes, &out.GcDeletes + *out = new(string) + **out = **in + } + if in.HighlightMaxAnalyzedOffset != nil { + in, out := &in.HighlightMaxAnalyzedOffset, &out.HighlightMaxAnalyzedOffset + *out = new(string) + **out = **in + } + if in.IncludeTypeName != nil { + in, out := &in.IncludeTypeName, &out.IncludeTypeName + *out = new(string) + **out = **in + } + if in.IndexKnn != nil { + in, out := &in.IndexKnn, &out.IndexKnn + *out = new(bool) + **out = **in + } + if in.IndexKnnAlgoParamEfSearch != nil { + in, out := &in.IndexKnnAlgoParamEfSearch, &out.IndexKnnAlgoParamEfSearch + *out = new(string) + **out = **in + } + if in.IndexSimilarityDefault != nil { + in, out := &in.IndexSimilarityDefault, &out.IndexSimilarityDefault + *out = new(string) + **out = **in + } + if in.IndexingSlowlogLevel != nil { + in, out := &in.IndexingSlowlogLevel, &out.IndexingSlowlogLevel + *out = new(string) + **out = **in + } + if in.IndexingSlowlogSource != nil { + in, out := &in.IndexingSlowlogSource, &out.IndexingSlowlogSource + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexDebug != nil { + in, out := &in.IndexingSlowlogThresholdIndexDebug, &out.IndexingSlowlogThresholdIndexDebug + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexInfo != nil { + in, out := &in.IndexingSlowlogThresholdIndexInfo, &out.IndexingSlowlogThresholdIndexInfo + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexTrace != nil { + in, out := &in.IndexingSlowlogThresholdIndexTrace, &out.IndexingSlowlogThresholdIndexTrace + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexWarn != nil { + in, out := &in.IndexingSlowlogThresholdIndexWarn, &out.IndexingSlowlogThresholdIndexWarn + *out = new(string) + **out = **in + } + if in.LoadFixedBitsetFiltersEagerly != nil { + in, out := &in.LoadFixedBitsetFiltersEagerly, &out.LoadFixedBitsetFiltersEagerly + *out = new(bool) + **out = **in + } + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = new(string) + **out = **in + } + if in.MaxDocvalueFieldsSearch != nil { + in, out := &in.MaxDocvalueFieldsSearch, &out.MaxDocvalueFieldsSearch + *out = new(string) + **out = **in + } + if in.MaxInnerResultWindow != nil { + in, out := &in.MaxInnerResultWindow, &out.MaxInnerResultWindow + *out = new(string) + **out = **in + } + if in.MaxNgramDiff != nil { + in, out := &in.MaxNgramDiff, &out.MaxNgramDiff + *out = new(string) + **out = **in + } + if in.MaxRefreshListeners != nil { + in, out := &in.MaxRefreshListeners, &out.MaxRefreshListeners + *out = new(string) + **out = **in + } + if in.MaxRegexLength != nil { + in, out := &in.MaxRegexLength, &out.MaxRegexLength + *out = new(string) + **out = **in + } + if in.MaxRescoreWindow != nil { + in, out := &in.MaxRescoreWindow, &out.MaxRescoreWindow + *out = new(string) + **out = **in + } + if in.MaxResultWindow != nil { + in, out := &in.MaxResultWindow, &out.MaxResultWindow + *out = new(string) + **out = **in + } + if in.MaxScriptFields != nil { + in, out := &in.MaxScriptFields, &out.MaxScriptFields + *out = new(string) + **out = **in + } + if in.MaxShingleDiff != nil { + in, out := &in.MaxShingleDiff, &out.MaxShingleDiff + *out = new(string) + **out = **in + } + if in.MaxTermsCount != nil { + in, out := &in.MaxTermsCount, &out.MaxTermsCount + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberOfReplicas != nil { + in, out := &in.NumberOfReplicas, &out.NumberOfReplicas + *out = new(string) + **out = **in + } + if in.NumberOfRoutingShards != nil { + in, out := &in.NumberOfRoutingShards, &out.NumberOfRoutingShards + *out = new(string) + **out = **in + } + if in.NumberOfShards != nil { + in, out := &in.NumberOfShards, &out.NumberOfShards + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(string) + **out = **in + } + if in.RolloverAlias != nil { + in, out := &in.RolloverAlias, &out.RolloverAlias + *out = new(string) + **out = **in + } + if in.RoutingAllocationEnable != nil { + in, out := &in.RoutingAllocationEnable, &out.RoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.RoutingPartitionSize != nil { + in, out := &in.RoutingPartitionSize, &out.RoutingPartitionSize + *out = new(string) + **out = **in + } + if in.RoutingRebalanceEnable != nil { + in, out := &in.RoutingRebalanceEnable, &out.RoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.SearchIdleAfter != nil { + in, out := &in.SearchIdleAfter, &out.SearchIdleAfter + *out = new(string) + **out = **in + } + if in.SearchSlowlogLevel != nil { + in, out := &in.SearchSlowlogLevel, &out.SearchSlowlogLevel + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchDebug != nil { + in, out := &in.SearchSlowlogThresholdFetchDebug, &out.SearchSlowlogThresholdFetchDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchInfo != nil { + in, out := &in.SearchSlowlogThresholdFetchInfo, &out.SearchSlowlogThresholdFetchInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchTrace != nil { + in, out := &in.SearchSlowlogThresholdFetchTrace, &out.SearchSlowlogThresholdFetchTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchWarn != nil { + in, out := &in.SearchSlowlogThresholdFetchWarn, &out.SearchSlowlogThresholdFetchWarn + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryDebug != nil { + in, out := &in.SearchSlowlogThresholdQueryDebug, &out.SearchSlowlogThresholdQueryDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryInfo != nil { + in, out := &in.SearchSlowlogThresholdQueryInfo, &out.SearchSlowlogThresholdQueryInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryTrace != nil { + in, out := &in.SearchSlowlogThresholdQueryTrace, &out.SearchSlowlogThresholdQueryTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryWarn != nil { + in, out := &in.SearchSlowlogThresholdQueryWarn, &out.SearchSlowlogThresholdQueryWarn + *out = new(string) + **out = **in + } + if in.ShardCheckOnStartup != nil { + in, out := &in.ShardCheckOnStartup, &out.ShardCheckOnStartup + *out = new(string) + **out = **in + } + if in.SortField != nil { + in, out := &in.SortField, &out.SortField + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexInitParameters. +func (in *IndexInitParameters) DeepCopy() *IndexInitParameters { + if in == nil { + return nil + } + out := new(IndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexList) DeepCopyInto(out *IndexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Index, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexList. +func (in *IndexList) DeepCopy() *IndexList { + if in == nil { + return nil + } + out := new(IndexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexObservation) DeepCopyInto(out *IndexObservation) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = new(string) + **out = **in + } + if in.AnalysisAnalyzer != nil { + in, out := &in.AnalysisAnalyzer, &out.AnalysisAnalyzer + *out = new(string) + **out = **in + } + if in.AnalysisCharFilter != nil { + in, out := &in.AnalysisCharFilter, &out.AnalysisCharFilter + *out = new(string) + **out = **in + } + if in.AnalysisFilter != nil { + in, out := &in.AnalysisFilter, &out.AnalysisFilter + *out = new(string) + **out = **in + } + if in.AnalysisNormalizer != nil { + in, out := &in.AnalysisNormalizer, &out.AnalysisNormalizer + *out = new(string) + **out = **in + } + if in.AnalysisTokenizer != nil { + in, out := &in.AnalysisTokenizer, &out.AnalysisTokenizer + *out = new(string) + **out = **in + } + if in.AnalyzeMaxTokenCount != nil { + in, out := &in.AnalyzeMaxTokenCount, &out.AnalyzeMaxTokenCount + *out = new(string) + **out = **in + } + if in.AutoExpandReplicas != nil { + in, out := &in.AutoExpandReplicas, &out.AutoExpandReplicas + *out = new(string) + **out = **in + } + if in.BlocksMetadata != nil { + in, out := &in.BlocksMetadata, &out.BlocksMetadata + *out = new(bool) + **out = **in + } + if in.BlocksRead != nil { + in, out := &in.BlocksRead, &out.BlocksRead + *out = new(bool) + **out = **in + } + if in.BlocksReadOnly != nil { + in, out := &in.BlocksReadOnly, &out.BlocksReadOnly + *out = new(bool) + **out = **in + } + if in.BlocksReadOnlyAllowDelete != nil { + in, out := &in.BlocksReadOnlyAllowDelete, &out.BlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.BlocksWrite != nil { + in, out := &in.BlocksWrite, &out.BlocksWrite + *out = new(bool) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DefaultPipeline != nil { + in, out := &in.DefaultPipeline, &out.DefaultPipeline + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.GcDeletes != nil { + in, out := &in.GcDeletes, &out.GcDeletes + *out = new(string) + **out = **in + } + if in.HighlightMaxAnalyzedOffset != nil { + in, out := &in.HighlightMaxAnalyzedOffset, &out.HighlightMaxAnalyzedOffset + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncludeTypeName != nil { + in, out := &in.IncludeTypeName, &out.IncludeTypeName + *out = new(string) + **out = **in + } + if in.IndexKnn != nil { + in, out := &in.IndexKnn, &out.IndexKnn + *out = new(bool) + **out = **in + } + if in.IndexKnnAlgoParamEfSearch != nil { + in, out := &in.IndexKnnAlgoParamEfSearch, &out.IndexKnnAlgoParamEfSearch + *out = new(string) + **out = **in + } + if in.IndexSimilarityDefault != nil { + in, out := &in.IndexSimilarityDefault, &out.IndexSimilarityDefault + *out = new(string) + **out = **in + } + if in.IndexingSlowlogLevel != nil { + in, out := &in.IndexingSlowlogLevel, &out.IndexingSlowlogLevel + *out = new(string) + **out = **in + } + if in.IndexingSlowlogSource != nil { + in, out := &in.IndexingSlowlogSource, &out.IndexingSlowlogSource + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexDebug != nil { + in, out := &in.IndexingSlowlogThresholdIndexDebug, &out.IndexingSlowlogThresholdIndexDebug + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexInfo != nil { + in, out := &in.IndexingSlowlogThresholdIndexInfo, &out.IndexingSlowlogThresholdIndexInfo + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexTrace != nil { + in, out := &in.IndexingSlowlogThresholdIndexTrace, &out.IndexingSlowlogThresholdIndexTrace + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexWarn != nil { + in, out := &in.IndexingSlowlogThresholdIndexWarn, &out.IndexingSlowlogThresholdIndexWarn + *out = new(string) + **out = **in + } + if in.LoadFixedBitsetFiltersEagerly != nil { + in, out := &in.LoadFixedBitsetFiltersEagerly, &out.LoadFixedBitsetFiltersEagerly + *out = new(bool) + **out = **in + } + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = new(string) + **out = **in + } + if in.MaxDocvalueFieldsSearch != nil { + in, out := &in.MaxDocvalueFieldsSearch, &out.MaxDocvalueFieldsSearch + *out = new(string) + **out = **in + } + if in.MaxInnerResultWindow != nil { + in, out := &in.MaxInnerResultWindow, &out.MaxInnerResultWindow + *out = new(string) + **out = **in + } + if in.MaxNgramDiff != nil { + in, out := &in.MaxNgramDiff, &out.MaxNgramDiff + *out = new(string) + **out = **in + } + if in.MaxRefreshListeners != nil { + in, out := &in.MaxRefreshListeners, &out.MaxRefreshListeners + *out = new(string) + **out = **in + } + if in.MaxRegexLength != nil { + in, out := &in.MaxRegexLength, &out.MaxRegexLength + *out = new(string) + **out = **in + } + if in.MaxRescoreWindow != nil { + in, out := &in.MaxRescoreWindow, &out.MaxRescoreWindow + *out = new(string) + **out = **in + } + if in.MaxResultWindow != nil { + in, out := &in.MaxResultWindow, &out.MaxResultWindow + *out = new(string) + **out = **in + } + if in.MaxScriptFields != nil { + in, out := &in.MaxScriptFields, &out.MaxScriptFields + *out = new(string) + **out = **in + } + if in.MaxShingleDiff != nil { + in, out := &in.MaxShingleDiff, &out.MaxShingleDiff + *out = new(string) + **out = **in + } + if in.MaxTermsCount != nil { + in, out := &in.MaxTermsCount, &out.MaxTermsCount + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberOfReplicas != nil { + in, out := &in.NumberOfReplicas, &out.NumberOfReplicas + *out = new(string) + **out = **in + } + if in.NumberOfRoutingShards != nil { + in, out := &in.NumberOfRoutingShards, &out.NumberOfRoutingShards + *out = new(string) + **out = **in + } + if in.NumberOfShards != nil { + in, out := &in.NumberOfShards, &out.NumberOfShards + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(string) + **out = **in + } + if in.RolloverAlias != nil { + in, out := &in.RolloverAlias, &out.RolloverAlias + *out = new(string) + **out = **in + } + if in.RoutingAllocationEnable != nil { + in, out := &in.RoutingAllocationEnable, &out.RoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.RoutingPartitionSize != nil { + in, out := &in.RoutingPartitionSize, &out.RoutingPartitionSize + *out = new(string) + **out = **in + } + if in.RoutingRebalanceEnable != nil { + in, out := &in.RoutingRebalanceEnable, &out.RoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.SearchIdleAfter != nil { + in, out := &in.SearchIdleAfter, &out.SearchIdleAfter + *out = new(string) + **out = **in + } + if in.SearchSlowlogLevel != nil { + in, out := &in.SearchSlowlogLevel, &out.SearchSlowlogLevel + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchDebug != nil { + in, out := &in.SearchSlowlogThresholdFetchDebug, &out.SearchSlowlogThresholdFetchDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchInfo != nil { + in, out := &in.SearchSlowlogThresholdFetchInfo, &out.SearchSlowlogThresholdFetchInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchTrace != nil { + in, out := &in.SearchSlowlogThresholdFetchTrace, &out.SearchSlowlogThresholdFetchTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchWarn != nil { + in, out := &in.SearchSlowlogThresholdFetchWarn, &out.SearchSlowlogThresholdFetchWarn + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryDebug != nil { + in, out := &in.SearchSlowlogThresholdQueryDebug, &out.SearchSlowlogThresholdQueryDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryInfo != nil { + in, out := &in.SearchSlowlogThresholdQueryInfo, &out.SearchSlowlogThresholdQueryInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryTrace != nil { + in, out := &in.SearchSlowlogThresholdQueryTrace, &out.SearchSlowlogThresholdQueryTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryWarn != nil { + in, out := &in.SearchSlowlogThresholdQueryWarn, &out.SearchSlowlogThresholdQueryWarn + *out = new(string) + **out = **in + } + if in.ShardCheckOnStartup != nil { + in, out := &in.ShardCheckOnStartup, &out.ShardCheckOnStartup + *out = new(string) + **out = **in + } + if in.SortField != nil { + in, out := &in.SortField, &out.SortField + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexObservation. +func (in *IndexObservation) DeepCopy() *IndexObservation { + if in == nil { + return nil + } + out := new(IndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexParameters) DeepCopyInto(out *IndexParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = new(string) + **out = **in + } + if in.AnalysisAnalyzer != nil { + in, out := &in.AnalysisAnalyzer, &out.AnalysisAnalyzer + *out = new(string) + **out = **in + } + if in.AnalysisCharFilter != nil { + in, out := &in.AnalysisCharFilter, &out.AnalysisCharFilter + *out = new(string) + **out = **in + } + if in.AnalysisFilter != nil { + in, out := &in.AnalysisFilter, &out.AnalysisFilter + *out = new(string) + **out = **in + } + if in.AnalysisNormalizer != nil { + in, out := &in.AnalysisNormalizer, &out.AnalysisNormalizer + *out = new(string) + **out = **in + } + if in.AnalysisTokenizer != nil { + in, out := &in.AnalysisTokenizer, &out.AnalysisTokenizer + *out = new(string) + **out = **in + } + if in.AnalyzeMaxTokenCount != nil { + in, out := &in.AnalyzeMaxTokenCount, &out.AnalyzeMaxTokenCount + *out = new(string) + **out = **in + } + if in.AutoExpandReplicas != nil { + in, out := &in.AutoExpandReplicas, &out.AutoExpandReplicas + *out = new(string) + **out = **in + } + if in.BlocksMetadata != nil { + in, out := &in.BlocksMetadata, &out.BlocksMetadata + *out = new(bool) + **out = **in + } + if in.BlocksRead != nil { + in, out := &in.BlocksRead, &out.BlocksRead + *out = new(bool) + **out = **in + } + if in.BlocksReadOnly != nil { + in, out := &in.BlocksReadOnly, &out.BlocksReadOnly + *out = new(bool) + **out = **in + } + if in.BlocksReadOnlyAllowDelete != nil { + in, out := &in.BlocksReadOnlyAllowDelete, &out.BlocksReadOnlyAllowDelete + *out = new(bool) + **out = **in + } + if in.BlocksWrite != nil { + in, out := &in.BlocksWrite, &out.BlocksWrite + *out = new(bool) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DefaultPipeline != nil { + in, out := &in.DefaultPipeline, &out.DefaultPipeline + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.GcDeletes != nil { + in, out := &in.GcDeletes, &out.GcDeletes + *out = new(string) + **out = **in + } + if in.HighlightMaxAnalyzedOffset != nil { + in, out := &in.HighlightMaxAnalyzedOffset, &out.HighlightMaxAnalyzedOffset + *out = new(string) + **out = **in + } + if in.IncludeTypeName != nil { + in, out := &in.IncludeTypeName, &out.IncludeTypeName + *out = new(string) + **out = **in + } + if in.IndexKnn != nil { + in, out := &in.IndexKnn, &out.IndexKnn + *out = new(bool) + **out = **in + } + if in.IndexKnnAlgoParamEfSearch != nil { + in, out := &in.IndexKnnAlgoParamEfSearch, &out.IndexKnnAlgoParamEfSearch + *out = new(string) + **out = **in + } + if in.IndexSimilarityDefault != nil { + in, out := &in.IndexSimilarityDefault, &out.IndexSimilarityDefault + *out = new(string) + **out = **in + } + if in.IndexingSlowlogLevel != nil { + in, out := &in.IndexingSlowlogLevel, &out.IndexingSlowlogLevel + *out = new(string) + **out = **in + } + if in.IndexingSlowlogSource != nil { + in, out := &in.IndexingSlowlogSource, &out.IndexingSlowlogSource + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexDebug != nil { + in, out := &in.IndexingSlowlogThresholdIndexDebug, &out.IndexingSlowlogThresholdIndexDebug + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexInfo != nil { + in, out := &in.IndexingSlowlogThresholdIndexInfo, &out.IndexingSlowlogThresholdIndexInfo + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexTrace != nil { + in, out := &in.IndexingSlowlogThresholdIndexTrace, &out.IndexingSlowlogThresholdIndexTrace + *out = new(string) + **out = **in + } + if in.IndexingSlowlogThresholdIndexWarn != nil { + in, out := &in.IndexingSlowlogThresholdIndexWarn, &out.IndexingSlowlogThresholdIndexWarn + *out = new(string) + **out = **in + } + if in.LoadFixedBitsetFiltersEagerly != nil { + in, out := &in.LoadFixedBitsetFiltersEagerly, &out.LoadFixedBitsetFiltersEagerly + *out = new(bool) + **out = **in + } + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = new(string) + **out = **in + } + if in.MaxDocvalueFieldsSearch != nil { + in, out := &in.MaxDocvalueFieldsSearch, &out.MaxDocvalueFieldsSearch + *out = new(string) + **out = **in + } + if in.MaxInnerResultWindow != nil { + in, out := &in.MaxInnerResultWindow, &out.MaxInnerResultWindow + *out = new(string) + **out = **in + } + if in.MaxNgramDiff != nil { + in, out := &in.MaxNgramDiff, &out.MaxNgramDiff + *out = new(string) + **out = **in + } + if in.MaxRefreshListeners != nil { + in, out := &in.MaxRefreshListeners, &out.MaxRefreshListeners + *out = new(string) + **out = **in + } + if in.MaxRegexLength != nil { + in, out := &in.MaxRegexLength, &out.MaxRegexLength + *out = new(string) + **out = **in + } + if in.MaxRescoreWindow != nil { + in, out := &in.MaxRescoreWindow, &out.MaxRescoreWindow + *out = new(string) + **out = **in + } + if in.MaxResultWindow != nil { + in, out := &in.MaxResultWindow, &out.MaxResultWindow + *out = new(string) + **out = **in + } + if in.MaxScriptFields != nil { + in, out := &in.MaxScriptFields, &out.MaxScriptFields + *out = new(string) + **out = **in + } + if in.MaxShingleDiff != nil { + in, out := &in.MaxShingleDiff, &out.MaxShingleDiff + *out = new(string) + **out = **in + } + if in.MaxTermsCount != nil { + in, out := &in.MaxTermsCount, &out.MaxTermsCount + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberOfReplicas != nil { + in, out := &in.NumberOfReplicas, &out.NumberOfReplicas + *out = new(string) + **out = **in + } + if in.NumberOfRoutingShards != nil { + in, out := &in.NumberOfRoutingShards, &out.NumberOfRoutingShards + *out = new(string) + **out = **in + } + if in.NumberOfShards != nil { + in, out := &in.NumberOfShards, &out.NumberOfShards + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(string) + **out = **in + } + if in.RolloverAlias != nil { + in, out := &in.RolloverAlias, &out.RolloverAlias + *out = new(string) + **out = **in + } + if in.RoutingAllocationEnable != nil { + in, out := &in.RoutingAllocationEnable, &out.RoutingAllocationEnable + *out = new(string) + **out = **in + } + if in.RoutingPartitionSize != nil { + in, out := &in.RoutingPartitionSize, &out.RoutingPartitionSize + *out = new(string) + **out = **in + } + if in.RoutingRebalanceEnable != nil { + in, out := &in.RoutingRebalanceEnable, &out.RoutingRebalanceEnable + *out = new(string) + **out = **in + } + if in.SearchIdleAfter != nil { + in, out := &in.SearchIdleAfter, &out.SearchIdleAfter + *out = new(string) + **out = **in + } + if in.SearchSlowlogLevel != nil { + in, out := &in.SearchSlowlogLevel, &out.SearchSlowlogLevel + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchDebug != nil { + in, out := &in.SearchSlowlogThresholdFetchDebug, &out.SearchSlowlogThresholdFetchDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchInfo != nil { + in, out := &in.SearchSlowlogThresholdFetchInfo, &out.SearchSlowlogThresholdFetchInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchTrace != nil { + in, out := &in.SearchSlowlogThresholdFetchTrace, &out.SearchSlowlogThresholdFetchTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdFetchWarn != nil { + in, out := &in.SearchSlowlogThresholdFetchWarn, &out.SearchSlowlogThresholdFetchWarn + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryDebug != nil { + in, out := &in.SearchSlowlogThresholdQueryDebug, &out.SearchSlowlogThresholdQueryDebug + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryInfo != nil { + in, out := &in.SearchSlowlogThresholdQueryInfo, &out.SearchSlowlogThresholdQueryInfo + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryTrace != nil { + in, out := &in.SearchSlowlogThresholdQueryTrace, &out.SearchSlowlogThresholdQueryTrace + *out = new(string) + **out = **in + } + if in.SearchSlowlogThresholdQueryWarn != nil { + in, out := &in.SearchSlowlogThresholdQueryWarn, &out.SearchSlowlogThresholdQueryWarn + *out = new(string) + **out = **in + } + if in.ShardCheckOnStartup != nil { + in, out := &in.ShardCheckOnStartup, &out.ShardCheckOnStartup + *out = new(string) + **out = **in + } + if in.SortField != nil { + in, out := &in.SortField, &out.SortField + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexParameters. +func (in *IndexParameters) DeepCopy() *IndexParameters { + if in == nil { + return nil + } + out := new(IndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPermissionsInitParameters) DeepCopyInto(out *IndexPermissionsInitParameters) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DocumentLevelSecurity != nil { + in, out := &in.DocumentLevelSecurity, &out.DocumentLevelSecurity + *out = new(string) + **out = **in + } + if in.FieldLevelSecurity != nil { + in, out := &in.FieldLevelSecurity, &out.FieldLevelSecurity + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexPatterns != nil { + in, out := &in.IndexPatterns, &out.IndexPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaskedFields != nil { + in, out := &in.MaskedFields, &out.MaskedFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPermissionsInitParameters. +func (in *IndexPermissionsInitParameters) DeepCopy() *IndexPermissionsInitParameters { + if in == nil { + return nil + } + out := new(IndexPermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPermissionsObservation) DeepCopyInto(out *IndexPermissionsObservation) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DocumentLevelSecurity != nil { + in, out := &in.DocumentLevelSecurity, &out.DocumentLevelSecurity + *out = new(string) + **out = **in + } + if in.FieldLevelSecurity != nil { + in, out := &in.FieldLevelSecurity, &out.FieldLevelSecurity + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexPatterns != nil { + in, out := &in.IndexPatterns, &out.IndexPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaskedFields != nil { + in, out := &in.MaskedFields, &out.MaskedFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPermissionsObservation. +func (in *IndexPermissionsObservation) DeepCopy() *IndexPermissionsObservation { + if in == nil { + return nil + } + out := new(IndexPermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPermissionsParameters) DeepCopyInto(out *IndexPermissionsParameters) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DocumentLevelSecurity != nil { + in, out := &in.DocumentLevelSecurity, &out.DocumentLevelSecurity + *out = new(string) + **out = **in + } + if in.FieldLevelSecurity != nil { + in, out := &in.FieldLevelSecurity, &out.FieldLevelSecurity + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexPatterns != nil { + in, out := &in.IndexPatterns, &out.IndexPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaskedFields != nil { + in, out := &in.MaskedFields, &out.MaskedFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPermissionsParameters. +func (in *IndexPermissionsParameters) DeepCopy() *IndexPermissionsParameters { + if in == nil { + return nil + } + out := new(IndexPermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexSpec) DeepCopyInto(out *IndexSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexSpec. +func (in *IndexSpec) DeepCopy() *IndexSpec { + if in == nil { + return nil + } + out := new(IndexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexStatus) DeepCopyInto(out *IndexStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatus. +func (in *IndexStatus) DeepCopy() *IndexStatus { + if in == nil { + return nil + } + out := new(IndexStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitor) DeepCopyInto(out *Monitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitor. +func (in *Monitor) DeepCopy() *Monitor { + if in == nil { + return nil + } + out := new(Monitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Monitor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorInitParameters) DeepCopyInto(out *MonitorInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorInitParameters. +func (in *MonitorInitParameters) DeepCopy() *MonitorInitParameters { + if in == nil { + return nil + } + out := new(MonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorList) DeepCopyInto(out *MonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Monitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorList. +func (in *MonitorList) DeepCopy() *MonitorList { + if in == nil { + return nil + } + out := new(MonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorObservation) DeepCopyInto(out *MonitorObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorObservation. +func (in *MonitorObservation) DeepCopy() *MonitorObservation { + if in == nil { + return nil + } + out := new(MonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorParameters) DeepCopyInto(out *MonitorParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorParameters. +func (in *MonitorParameters) DeepCopy() *MonitorParameters { + if in == nil { + return nil + } + out := new(MonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSpec) DeepCopyInto(out *MonitorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSpec. +func (in *MonitorSpec) DeepCopy() *MonitorSpec { + if in == nil { + return nil + } + out := new(MonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorStatus) DeepCopyInto(out *MonitorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorStatus. +func (in *MonitorStatus) DeepCopy() *MonitorStatus { + if in == nil { + return nil + } + out := new(MonitorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleInitParameters) DeepCopyInto(out *RoleInitParameters) { + *out = *in + if in.ClusterPermissions != nil { + in, out := &in.ClusterPermissions, &out.ClusterPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexPermissions != nil { + in, out := &in.IndexPermissions, &out.IndexPermissions + *out = make([]IndexPermissionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.TenantPermissions != nil { + in, out := &in.TenantPermissions, &out.TenantPermissions + *out = make([]TenantPermissionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleInitParameters. +func (in *RoleInitParameters) DeepCopy() *RoleInitParameters { + if in == nil { + return nil + } + out := new(RoleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleObservation) DeepCopyInto(out *RoleObservation) { + *out = *in + if in.ClusterPermissions != nil { + in, out := &in.ClusterPermissions, &out.ClusterPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexPermissions != nil { + in, out := &in.IndexPermissions, &out.IndexPermissions + *out = make([]IndexPermissionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.TenantPermissions != nil { + in, out := &in.TenantPermissions, &out.TenantPermissions + *out = make([]TenantPermissionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleObservation. +func (in *RoleObservation) DeepCopy() *RoleObservation { + if in == nil { + return nil + } + out := new(RoleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleParameters) DeepCopyInto(out *RoleParameters) { + *out = *in + if in.ClusterPermissions != nil { + in, out := &in.ClusterPermissions, &out.ClusterPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexPermissions != nil { + in, out := &in.IndexPermissions, &out.IndexPermissions + *out = make([]IndexPermissionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.TenantPermissions != nil { + in, out := &in.TenantPermissions, &out.TenantPermissions + *out = make([]TenantPermissionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleParameters. +func (in *RoleParameters) DeepCopy() *RoleParameters { + if in == nil { + return nil + } + out := new(RoleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleSpec) DeepCopyInto(out *RoleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec. +func (in *RoleSpec) DeepCopy() *RoleSpec { + if in == nil { + return nil + } + out := new(RoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleStatus) DeepCopyInto(out *RoleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleStatus. +func (in *RoleStatus) DeepCopy() *RoleStatus { + if in == nil { + return nil + } + out := new(RoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Script) DeepCopyInto(out *Script) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Script. +func (in *Script) DeepCopy() *Script { + if in == nil { + return nil + } + out := new(Script) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Script) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptInitParameters) DeepCopyInto(out *ScriptInitParameters) { + *out = *in + if in.Lang != nil { + in, out := &in.Lang, &out.Lang + *out = new(string) + **out = **in + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptInitParameters. +func (in *ScriptInitParameters) DeepCopy() *ScriptInitParameters { + if in == nil { + return nil + } + out := new(ScriptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptList) DeepCopyInto(out *ScriptList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Script, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptList. +func (in *ScriptList) DeepCopy() *ScriptList { + if in == nil { + return nil + } + out := new(ScriptList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScriptList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptObservation) DeepCopyInto(out *ScriptObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Lang != nil { + in, out := &in.Lang, &out.Lang + *out = new(string) + **out = **in + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptObservation. +func (in *ScriptObservation) DeepCopy() *ScriptObservation { + if in == nil { + return nil + } + out := new(ScriptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptParameters) DeepCopyInto(out *ScriptParameters) { + *out = *in + if in.Lang != nil { + in, out := &in.Lang, &out.Lang + *out = new(string) + **out = **in + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptParameters. +func (in *ScriptParameters) DeepCopy() *ScriptParameters { + if in == nil { + return nil + } + out := new(ScriptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptSpec) DeepCopyInto(out *ScriptSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptSpec. +func (in *ScriptSpec) DeepCopy() *ScriptSpec { + if in == nil { + return nil + } + out := new(ScriptSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptStatus) DeepCopyInto(out *ScriptStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptStatus. +func (in *ScriptStatus) DeepCopy() *ScriptStatus { + if in == nil { + return nil + } + out := new(ScriptStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantPermissionsInitParameters) DeepCopyInto(out *TenantPermissionsInitParameters) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TenantPatterns != nil { + in, out := &in.TenantPatterns, &out.TenantPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantPermissionsInitParameters. +func (in *TenantPermissionsInitParameters) DeepCopy() *TenantPermissionsInitParameters { + if in == nil { + return nil + } + out := new(TenantPermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantPermissionsObservation) DeepCopyInto(out *TenantPermissionsObservation) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TenantPatterns != nil { + in, out := &in.TenantPatterns, &out.TenantPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantPermissionsObservation. +func (in *TenantPermissionsObservation) DeepCopy() *TenantPermissionsObservation { + if in == nil { + return nil + } + out := new(TenantPermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantPermissionsParameters) DeepCopyInto(out *TenantPermissionsParameters) { + *out = *in + if in.AllowedActions != nil { + in, out := &in.AllowedActions, &out.AllowedActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TenantPatterns != nil { + in, out := &in.TenantPatterns, &out.TenantPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantPermissionsParameters. +func (in *TenantPermissionsParameters) DeepCopy() *TenantPermissionsParameters { + if in == nil { + return nil + } + out := new(TenantPermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PasswordHashSecretRef != nil { + in, out := &in.PasswordHashSecretRef, &out.PasswordHashSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/opensearch/v1alpha1/zz_generated.managed.go b/apis/opensearch/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..41d2f3f --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_generated.managed.go @@ -0,0 +1,305 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Index. +func (mg *Index) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Index. +func (mg *Index) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Index. +func (mg *Index) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Index. +func (mg *Index) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Index. +func (mg *Index) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Index. +func (mg *Index) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Index. +func (mg *Index) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Index. +func (mg *Index) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Index. +func (mg *Index) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Index. +func (mg *Index) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Index. +func (mg *Index) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Index. +func (mg *Index) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Monitor. +func (mg *Monitor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Monitor. +func (mg *Monitor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Monitor. +func (mg *Monitor) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Monitor. +func (mg *Monitor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Monitor. +func (mg *Monitor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Monitor. +func (mg *Monitor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Monitor. +func (mg *Monitor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Monitor. +func (mg *Monitor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Monitor. +func (mg *Monitor) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Monitor. +func (mg *Monitor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Monitor. +func (mg *Monitor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Monitor. +func (mg *Monitor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Role. +func (mg *Role) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Role. +func (mg *Role) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Role. +func (mg *Role) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Role. +func (mg *Role) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Role. +func (mg *Role) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Role. +func (mg *Role) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Role. +func (mg *Role) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Role. +func (mg *Role) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Role. +func (mg *Role) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Role. +func (mg *Role) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Role. +func (mg *Role) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Role. +func (mg *Role) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Script. +func (mg *Script) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Script. +func (mg *Script) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Script. +func (mg *Script) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Script. +func (mg *Script) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Script. +func (mg *Script) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Script. +func (mg *Script) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Script. +func (mg *Script) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Script. +func (mg *Script) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Script. +func (mg *Script) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Script. +func (mg *Script) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Script. +func (mg *Script) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Script. +func (mg *Script) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/opensearch/v1alpha1/zz_generated.managedlist.go b/apis/opensearch/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..16d3f11 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,50 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IndexList. +func (l *IndexList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorList. +func (l *MonitorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RoleList. +func (l *RoleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScriptList. +func (l *ScriptList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/opensearch/v1alpha1/zz_groupversion_info.go b/apis/opensearch/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..c03bac7 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=opensearch.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "opensearch.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/opensearch/v1alpha1/zz_index_terraformed.go b/apis/opensearch/v1alpha1/zz_index_terraformed.go new file mode 100755 index 0000000..ac78ebe --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_index_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Index +func (mg *Index) GetTerraformResourceType() string { + return "opensearch_index" +} + +// GetConnectionDetailsMapping for this Index +func (tr *Index) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Index +func (tr *Index) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Index +func (tr *Index) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Index +func (tr *Index) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Index +func (tr *Index) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Index +func (tr *Index) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Index +func (tr *Index) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Index +func (tr *Index) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Index using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Index) LateInitialize(attrs []byte) (bool, error) { + params := &IndexParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Index) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1alpha1/zz_index_types.go b/apis/opensearch/v1alpha1/zz_index_types.go new file mode 100755 index 0000000..6772365 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_index_types.go @@ -0,0 +1,891 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IndexInitParameters struct { + + // (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + // A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + Aliases *string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // (String) A JSON string describing the analyzers applied to the index. + // A JSON string describing the analyzers applied to the index. + AnalysisAnalyzer *string `json:"analysisAnalyzer,omitempty" tf:"analysis_analyzer,omitempty"` + + // (String) A JSON string describing the char_filters applied to the index. + // A JSON string describing the char_filters applied to the index. + AnalysisCharFilter *string `json:"analysisCharFilter,omitempty" tf:"analysis_char_filter,omitempty"` + + // (String) A JSON string describing the filters applied to the index. + // A JSON string describing the filters applied to the index. + AnalysisFilter *string `json:"analysisFilter,omitempty" tf:"analysis_filter,omitempty"` + + // (String) A JSON string describing the normalizers applied to the index. + // A JSON string describing the normalizers applied to the index. + AnalysisNormalizer *string `json:"analysisNormalizer,omitempty" tf:"analysis_normalizer,omitempty"` + + // (String) A JSON string describing the tokenizers applied to the index. + // A JSON string describing the tokenizers applied to the index. + AnalysisTokenizer *string `json:"analysisTokenizer,omitempty" tf:"analysis_tokenizer,omitempty"` + + // (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + // The maximum number of tokens that can be produced using _analyze API. A stringified number. + AnalyzeMaxTokenCount *string `json:"analyzeMaxTokenCount,omitempty" tf:"analyze_max_token_count,omitempty"` + + // 5) or use all for the upper bound (e.g. 0-all) + // Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + AutoExpandReplicas *string `json:"autoExpandReplicas,omitempty" tf:"auto_expand_replicas,omitempty"` + + // (Boolean) Set to true to disable index metadata reads and writes. + // Set to `true` to disable index metadata reads and writes. + BlocksMetadata *bool `json:"blocksMetadata,omitempty" tf:"blocks_metadata,omitempty"` + + // (Boolean) Set to true to disable read operations against the index. + // Set to `true` to disable read operations against the index. + BlocksRead *bool `json:"blocksRead,omitempty" tf:"blocks_read,omitempty"` + + // (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + // Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + BlocksReadOnly *bool `json:"blocksReadOnly,omitempty" tf:"blocks_read_only,omitempty"` + + // (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + // Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + BlocksReadOnlyAllowDelete *bool `json:"blocksReadOnlyAllowDelete,omitempty" tf:"blocks_read_only_allow_delete,omitempty"` + + // (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + // Set to `true` to disable data write operations against the index. This setting does not affect metadata. + BlocksWrite *bool `json:"blocksWrite,omitempty" tf:"blocks_write,omitempty"` + + // (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + // The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + // The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + DefaultPipeline *string `json:"defaultPipeline,omitempty" tf:"default_pipeline,omitempty"` + + // (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + // A boolean that indicates that the index should be deleted even if it contains documents. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // (String) The length of time that a deleted document's version number remains available for further versioned operations. + // The length of time that a deleted document's version number remains available for further versioned operations. + GcDeletes *string `json:"gcDeletes,omitempty" tf:"gc_deletes,omitempty"` + + // (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + // The maximum number of characters that will be analyzed for a highlight request. A stringified number. + HighlightMaxAnalyzedOffset *string `json:"highlightMaxAnalyzedOffset,omitempty" tf:"highlight_max_analyzed_offset,omitempty"` + + // (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + // A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + IncludeTypeName *string `json:"includeTypeName,omitempty" tf:"include_type_name,omitempty"` + + // NN search functionality will be disabled. + // Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + IndexKnn *bool `json:"indexKnn,omitempty" tf:"index_knn,omitempty"` + + // NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + // The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + IndexKnnAlgoParamEfSearch *string `json:"indexKnnAlgoParamEfSearch,omitempty" tf:"index_knn_algo_param_ef_search,omitempty"` + + // (String) A JSON string describing the default index similarity config. + // A JSON string describing the default index similarity config. + IndexSimilarityDefault *string `json:"indexSimilarityDefault,omitempty" tf:"index_similarity_default,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + IndexingSlowlogLevel *string `json:"indexingSlowlogLevel,omitempty" tf:"indexing_slowlog_level,omitempty"` + + // (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + // Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + IndexingSlowlogSource *string `json:"indexingSlowlogSource,omitempty" tf:"indexing_slowlog_source,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + IndexingSlowlogThresholdIndexDebug *string `json:"indexingSlowlogThresholdIndexDebug,omitempty" tf:"indexing_slowlog_threshold_index_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + IndexingSlowlogThresholdIndexInfo *string `json:"indexingSlowlogThresholdIndexInfo,omitempty" tf:"indexing_slowlog_threshold_index_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + IndexingSlowlogThresholdIndexTrace *string `json:"indexingSlowlogThresholdIndexTrace,omitempty" tf:"indexing_slowlog_threshold_index_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + IndexingSlowlogThresholdIndexWarn *string `json:"indexingSlowlogThresholdIndexWarn,omitempty" tf:"indexing_slowlog_threshold_index_warn,omitempty"` + + // loaded for nested queries. This can be set only on creation. + // Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + LoadFixedBitsetFiltersEagerly *bool `json:"loadFixedBitsetFiltersEagerly,omitempty" tf:"load_fixed_bitset_filters_eagerly,omitempty"` + + // (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + // A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + Mappings *string `json:"mappings,omitempty" tf:"mappings,omitempty"` + + // (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + // The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + MaxDocvalueFieldsSearch *string `json:"maxDocvalueFieldsSearch,omitempty" tf:"max_docvalue_fields_search,omitempty"` + + // (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + // The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + MaxInnerResultWindow *string `json:"maxInnerResultWindow,omitempty" tf:"max_inner_result_window,omitempty"` + + // (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + // The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + MaxNgramDiff *string `json:"maxNgramDiff,omitempty" tf:"max_ngram_diff,omitempty"` + + // (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + // Maximum number of refresh listeners available on each shard of the index. A stringified number. + MaxRefreshListeners *string `json:"maxRefreshListeners,omitempty" tf:"max_refresh_listeners,omitempty"` + + // (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + // The maximum length of regex that can be used in Regexp Query. A stringified number. + MaxRegexLength *string `json:"maxRegexLength,omitempty" tf:"max_regex_length,omitempty"` + + // (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + // The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + MaxRescoreWindow *string `json:"maxRescoreWindow,omitempty" tf:"max_rescore_window,omitempty"` + + // (String) The maximum value of from + size for searches to this index. A stringified number. + // The maximum value of `from + size` for searches to this index. A stringified number. + MaxResultWindow *string `json:"maxResultWindow,omitempty" tf:"max_result_window,omitempty"` + + // (String) The maximum number of script_fields that are allowed in a query. A stringified number. + // The maximum number of `script_fields` that are allowed in a query. A stringified number. + MaxScriptFields *string `json:"maxScriptFields,omitempty" tf:"max_script_fields,omitempty"` + + // (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + // The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + MaxShingleDiff *string `json:"maxShingleDiff,omitempty" tf:"max_shingle_diff,omitempty"` + + // (String) The maximum number of terms that can be used in Terms Query. A stringified number. + // The maximum number of terms that can be used in Terms Query. A stringified number. + MaxTermsCount *string `json:"maxTermsCount,omitempty" tf:"max_terms_count,omitempty"` + + // (String) Name of the index to create + // Name of the index to create + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) Number of shard replicas. A stringified number. + // Number of shard replicas. A stringified number. + NumberOfReplicas *string `json:"numberOfReplicas,omitempty" tf:"number_of_replicas,omitempty"` + + // (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + // Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + NumberOfRoutingShards *string `json:"numberOfRoutingShards,omitempty" tf:"number_of_routing_shards,omitempty"` + + // (String) Number of shards for the index. This can be set only on creation. + // Number of shards for the index. This can be set only on creation. + NumberOfShards *string `json:"numberOfShards,omitempty" tf:"number_of_shards,omitempty"` + + // 1 to disable refresh. + // How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + RefreshInterval *string `json:"refreshInterval,omitempty" tf:"refresh_interval,omitempty"` + + // (String) + RolloverAlias *string `json:"rolloverAlias,omitempty" tf:"rollover_alias,omitempty"` + + // (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + // Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + RoutingAllocationEnable *string `json:"routingAllocationEnable,omitempty" tf:"routing_allocation_enable,omitempty"` + + // (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + // The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + RoutingPartitionSize *string `json:"routingPartitionSize,omitempty" tf:"routing_partition_size,omitempty"` + + // (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + // Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + RoutingRebalanceEnable *string `json:"routingRebalanceEnable,omitempty" tf:"routing_rebalance_enable,omitempty"` + + // (String) How long a shard can not receive a search or get request until it’s considered search idle. + // How long a shard can not receive a search or get request until it’s considered search idle. + SearchIdleAfter *string `json:"searchIdleAfter,omitempty" tf:"search_idle_after,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + SearchSlowlogLevel *string `json:"searchSlowlogLevel,omitempty" tf:"search_slowlog_level,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + SearchSlowlogThresholdFetchDebug *string `json:"searchSlowlogThresholdFetchDebug,omitempty" tf:"search_slowlog_threshold_fetch_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + SearchSlowlogThresholdFetchInfo *string `json:"searchSlowlogThresholdFetchInfo,omitempty" tf:"search_slowlog_threshold_fetch_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + SearchSlowlogThresholdFetchTrace *string `json:"searchSlowlogThresholdFetchTrace,omitempty" tf:"search_slowlog_threshold_fetch_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + SearchSlowlogThresholdFetchWarn *string `json:"searchSlowlogThresholdFetchWarn,omitempty" tf:"search_slowlog_threshold_fetch_warn,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + SearchSlowlogThresholdQueryDebug *string `json:"searchSlowlogThresholdQueryDebug,omitempty" tf:"search_slowlog_threshold_query_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + SearchSlowlogThresholdQueryInfo *string `json:"searchSlowlogThresholdQueryInfo,omitempty" tf:"search_slowlog_threshold_query_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + SearchSlowlogThresholdQueryTrace *string `json:"searchSlowlogThresholdQueryTrace,omitempty" tf:"search_slowlog_threshold_query_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + SearchSlowlogThresholdQueryWarn *string `json:"searchSlowlogThresholdQueryWarn,omitempty" tf:"search_slowlog_threshold_query_warn,omitempty"` + + // (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + // Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + ShardCheckOnStartup *string `json:"shardCheckOnStartup,omitempty" tf:"shard_check_on_startup,omitempty"` + + // (String) The field to sort shards in this index by. + // The field to sort shards in this index by. + SortField *string `json:"sortField,omitempty" tf:"sort_field,omitempty"` + + // (String) The direction to sort shards in. Accepts asc, desc. + // The direction to sort shards in. Accepts `asc`, `desc`. + SortOrder *string `json:"sortOrder,omitempty" tf:"sort_order,omitempty"` +} + +type IndexObservation struct { + + // (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + // A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + Aliases *string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // (String) A JSON string describing the analyzers applied to the index. + // A JSON string describing the analyzers applied to the index. + AnalysisAnalyzer *string `json:"analysisAnalyzer,omitempty" tf:"analysis_analyzer,omitempty"` + + // (String) A JSON string describing the char_filters applied to the index. + // A JSON string describing the char_filters applied to the index. + AnalysisCharFilter *string `json:"analysisCharFilter,omitempty" tf:"analysis_char_filter,omitempty"` + + // (String) A JSON string describing the filters applied to the index. + // A JSON string describing the filters applied to the index. + AnalysisFilter *string `json:"analysisFilter,omitempty" tf:"analysis_filter,omitempty"` + + // (String) A JSON string describing the normalizers applied to the index. + // A JSON string describing the normalizers applied to the index. + AnalysisNormalizer *string `json:"analysisNormalizer,omitempty" tf:"analysis_normalizer,omitempty"` + + // (String) A JSON string describing the tokenizers applied to the index. + // A JSON string describing the tokenizers applied to the index. + AnalysisTokenizer *string `json:"analysisTokenizer,omitempty" tf:"analysis_tokenizer,omitempty"` + + // (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + // The maximum number of tokens that can be produced using _analyze API. A stringified number. + AnalyzeMaxTokenCount *string `json:"analyzeMaxTokenCount,omitempty" tf:"analyze_max_token_count,omitempty"` + + // 5) or use all for the upper bound (e.g. 0-all) + // Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + AutoExpandReplicas *string `json:"autoExpandReplicas,omitempty" tf:"auto_expand_replicas,omitempty"` + + // (Boolean) Set to true to disable index metadata reads and writes. + // Set to `true` to disable index metadata reads and writes. + BlocksMetadata *bool `json:"blocksMetadata,omitempty" tf:"blocks_metadata,omitempty"` + + // (Boolean) Set to true to disable read operations against the index. + // Set to `true` to disable read operations against the index. + BlocksRead *bool `json:"blocksRead,omitempty" tf:"blocks_read,omitempty"` + + // (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + // Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + BlocksReadOnly *bool `json:"blocksReadOnly,omitempty" tf:"blocks_read_only,omitempty"` + + // (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + // Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + BlocksReadOnlyAllowDelete *bool `json:"blocksReadOnlyAllowDelete,omitempty" tf:"blocks_read_only_allow_delete,omitempty"` + + // (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + // Set to `true` to disable data write operations against the index. This setting does not affect metadata. + BlocksWrite *bool `json:"blocksWrite,omitempty" tf:"blocks_write,omitempty"` + + // (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + // The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + // The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + DefaultPipeline *string `json:"defaultPipeline,omitempty" tf:"default_pipeline,omitempty"` + + // (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + // A boolean that indicates that the index should be deleted even if it contains documents. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // (String) The length of time that a deleted document's version number remains available for further versioned operations. + // The length of time that a deleted document's version number remains available for further versioned operations. + GcDeletes *string `json:"gcDeletes,omitempty" tf:"gc_deletes,omitempty"` + + // (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + // The maximum number of characters that will be analyzed for a highlight request. A stringified number. + HighlightMaxAnalyzedOffset *string `json:"highlightMaxAnalyzedOffset,omitempty" tf:"highlight_max_analyzed_offset,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + // A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + IncludeTypeName *string `json:"includeTypeName,omitempty" tf:"include_type_name,omitempty"` + + // NN search functionality will be disabled. + // Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + IndexKnn *bool `json:"indexKnn,omitempty" tf:"index_knn,omitempty"` + + // NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + // The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + IndexKnnAlgoParamEfSearch *string `json:"indexKnnAlgoParamEfSearch,omitempty" tf:"index_knn_algo_param_ef_search,omitempty"` + + // (String) A JSON string describing the default index similarity config. + // A JSON string describing the default index similarity config. + IndexSimilarityDefault *string `json:"indexSimilarityDefault,omitempty" tf:"index_similarity_default,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + IndexingSlowlogLevel *string `json:"indexingSlowlogLevel,omitempty" tf:"indexing_slowlog_level,omitempty"` + + // (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + // Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + IndexingSlowlogSource *string `json:"indexingSlowlogSource,omitempty" tf:"indexing_slowlog_source,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + IndexingSlowlogThresholdIndexDebug *string `json:"indexingSlowlogThresholdIndexDebug,omitempty" tf:"indexing_slowlog_threshold_index_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + IndexingSlowlogThresholdIndexInfo *string `json:"indexingSlowlogThresholdIndexInfo,omitempty" tf:"indexing_slowlog_threshold_index_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + IndexingSlowlogThresholdIndexTrace *string `json:"indexingSlowlogThresholdIndexTrace,omitempty" tf:"indexing_slowlog_threshold_index_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + IndexingSlowlogThresholdIndexWarn *string `json:"indexingSlowlogThresholdIndexWarn,omitempty" tf:"indexing_slowlog_threshold_index_warn,omitempty"` + + // loaded for nested queries. This can be set only on creation. + // Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + LoadFixedBitsetFiltersEagerly *bool `json:"loadFixedBitsetFiltersEagerly,omitempty" tf:"load_fixed_bitset_filters_eagerly,omitempty"` + + // (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + // A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + Mappings *string `json:"mappings,omitempty" tf:"mappings,omitempty"` + + // (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + // The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + MaxDocvalueFieldsSearch *string `json:"maxDocvalueFieldsSearch,omitempty" tf:"max_docvalue_fields_search,omitempty"` + + // (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + // The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + MaxInnerResultWindow *string `json:"maxInnerResultWindow,omitempty" tf:"max_inner_result_window,omitempty"` + + // (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + // The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + MaxNgramDiff *string `json:"maxNgramDiff,omitempty" tf:"max_ngram_diff,omitempty"` + + // (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + // Maximum number of refresh listeners available on each shard of the index. A stringified number. + MaxRefreshListeners *string `json:"maxRefreshListeners,omitempty" tf:"max_refresh_listeners,omitempty"` + + // (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + // The maximum length of regex that can be used in Regexp Query. A stringified number. + MaxRegexLength *string `json:"maxRegexLength,omitempty" tf:"max_regex_length,omitempty"` + + // (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + // The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + MaxRescoreWindow *string `json:"maxRescoreWindow,omitempty" tf:"max_rescore_window,omitempty"` + + // (String) The maximum value of from + size for searches to this index. A stringified number. + // The maximum value of `from + size` for searches to this index. A stringified number. + MaxResultWindow *string `json:"maxResultWindow,omitempty" tf:"max_result_window,omitempty"` + + // (String) The maximum number of script_fields that are allowed in a query. A stringified number. + // The maximum number of `script_fields` that are allowed in a query. A stringified number. + MaxScriptFields *string `json:"maxScriptFields,omitempty" tf:"max_script_fields,omitempty"` + + // (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + // The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + MaxShingleDiff *string `json:"maxShingleDiff,omitempty" tf:"max_shingle_diff,omitempty"` + + // (String) The maximum number of terms that can be used in Terms Query. A stringified number. + // The maximum number of terms that can be used in Terms Query. A stringified number. + MaxTermsCount *string `json:"maxTermsCount,omitempty" tf:"max_terms_count,omitempty"` + + // (String) Name of the index to create + // Name of the index to create + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) Number of shard replicas. A stringified number. + // Number of shard replicas. A stringified number. + NumberOfReplicas *string `json:"numberOfReplicas,omitempty" tf:"number_of_replicas,omitempty"` + + // (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + // Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + NumberOfRoutingShards *string `json:"numberOfRoutingShards,omitempty" tf:"number_of_routing_shards,omitempty"` + + // (String) Number of shards for the index. This can be set only on creation. + // Number of shards for the index. This can be set only on creation. + NumberOfShards *string `json:"numberOfShards,omitempty" tf:"number_of_shards,omitempty"` + + // 1 to disable refresh. + // How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + RefreshInterval *string `json:"refreshInterval,omitempty" tf:"refresh_interval,omitempty"` + + // (String) + RolloverAlias *string `json:"rolloverAlias,omitempty" tf:"rollover_alias,omitempty"` + + // (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + // Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + RoutingAllocationEnable *string `json:"routingAllocationEnable,omitempty" tf:"routing_allocation_enable,omitempty"` + + // (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + // The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + RoutingPartitionSize *string `json:"routingPartitionSize,omitempty" tf:"routing_partition_size,omitempty"` + + // (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + // Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + RoutingRebalanceEnable *string `json:"routingRebalanceEnable,omitempty" tf:"routing_rebalance_enable,omitempty"` + + // (String) How long a shard can not receive a search or get request until it’s considered search idle. + // How long a shard can not receive a search or get request until it’s considered search idle. + SearchIdleAfter *string `json:"searchIdleAfter,omitempty" tf:"search_idle_after,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + SearchSlowlogLevel *string `json:"searchSlowlogLevel,omitempty" tf:"search_slowlog_level,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + SearchSlowlogThresholdFetchDebug *string `json:"searchSlowlogThresholdFetchDebug,omitempty" tf:"search_slowlog_threshold_fetch_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + SearchSlowlogThresholdFetchInfo *string `json:"searchSlowlogThresholdFetchInfo,omitempty" tf:"search_slowlog_threshold_fetch_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + SearchSlowlogThresholdFetchTrace *string `json:"searchSlowlogThresholdFetchTrace,omitempty" tf:"search_slowlog_threshold_fetch_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + SearchSlowlogThresholdFetchWarn *string `json:"searchSlowlogThresholdFetchWarn,omitempty" tf:"search_slowlog_threshold_fetch_warn,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + SearchSlowlogThresholdQueryDebug *string `json:"searchSlowlogThresholdQueryDebug,omitempty" tf:"search_slowlog_threshold_query_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + SearchSlowlogThresholdQueryInfo *string `json:"searchSlowlogThresholdQueryInfo,omitempty" tf:"search_slowlog_threshold_query_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + SearchSlowlogThresholdQueryTrace *string `json:"searchSlowlogThresholdQueryTrace,omitempty" tf:"search_slowlog_threshold_query_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + SearchSlowlogThresholdQueryWarn *string `json:"searchSlowlogThresholdQueryWarn,omitempty" tf:"search_slowlog_threshold_query_warn,omitempty"` + + // (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + // Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + ShardCheckOnStartup *string `json:"shardCheckOnStartup,omitempty" tf:"shard_check_on_startup,omitempty"` + + // (String) The field to sort shards in this index by. + // The field to sort shards in this index by. + SortField *string `json:"sortField,omitempty" tf:"sort_field,omitempty"` + + // (String) The direction to sort shards in. Accepts asc, desc. + // The direction to sort shards in. Accepts `asc`, `desc`. + SortOrder *string `json:"sortOrder,omitempty" tf:"sort_order,omitempty"` +} + +type IndexParameters struct { + + // (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + // A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + // +kubebuilder:validation:Optional + Aliases *string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // (String) A JSON string describing the analyzers applied to the index. + // A JSON string describing the analyzers applied to the index. + // +kubebuilder:validation:Optional + AnalysisAnalyzer *string `json:"analysisAnalyzer,omitempty" tf:"analysis_analyzer,omitempty"` + + // (String) A JSON string describing the char_filters applied to the index. + // A JSON string describing the char_filters applied to the index. + // +kubebuilder:validation:Optional + AnalysisCharFilter *string `json:"analysisCharFilter,omitempty" tf:"analysis_char_filter,omitempty"` + + // (String) A JSON string describing the filters applied to the index. + // A JSON string describing the filters applied to the index. + // +kubebuilder:validation:Optional + AnalysisFilter *string `json:"analysisFilter,omitempty" tf:"analysis_filter,omitempty"` + + // (String) A JSON string describing the normalizers applied to the index. + // A JSON string describing the normalizers applied to the index. + // +kubebuilder:validation:Optional + AnalysisNormalizer *string `json:"analysisNormalizer,omitempty" tf:"analysis_normalizer,omitempty"` + + // (String) A JSON string describing the tokenizers applied to the index. + // A JSON string describing the tokenizers applied to the index. + // +kubebuilder:validation:Optional + AnalysisTokenizer *string `json:"analysisTokenizer,omitempty" tf:"analysis_tokenizer,omitempty"` + + // (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + // The maximum number of tokens that can be produced using _analyze API. A stringified number. + // +kubebuilder:validation:Optional + AnalyzeMaxTokenCount *string `json:"analyzeMaxTokenCount,omitempty" tf:"analyze_max_token_count,omitempty"` + + // 5) or use all for the upper bound (e.g. 0-all) + // Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + // +kubebuilder:validation:Optional + AutoExpandReplicas *string `json:"autoExpandReplicas,omitempty" tf:"auto_expand_replicas,omitempty"` + + // (Boolean) Set to true to disable index metadata reads and writes. + // Set to `true` to disable index metadata reads and writes. + // +kubebuilder:validation:Optional + BlocksMetadata *bool `json:"blocksMetadata,omitempty" tf:"blocks_metadata,omitempty"` + + // (Boolean) Set to true to disable read operations against the index. + // Set to `true` to disable read operations against the index. + // +kubebuilder:validation:Optional + BlocksRead *bool `json:"blocksRead,omitempty" tf:"blocks_read,omitempty"` + + // (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + // Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + // +kubebuilder:validation:Optional + BlocksReadOnly *bool `json:"blocksReadOnly,omitempty" tf:"blocks_read_only,omitempty"` + + // (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + // Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + // +kubebuilder:validation:Optional + BlocksReadOnlyAllowDelete *bool `json:"blocksReadOnlyAllowDelete,omitempty" tf:"blocks_read_only_allow_delete,omitempty"` + + // (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + // Set to `true` to disable data write operations against the index. This setting does not affect metadata. + // +kubebuilder:validation:Optional + BlocksWrite *bool `json:"blocksWrite,omitempty" tf:"blocks_write,omitempty"` + + // (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + // The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + // +kubebuilder:validation:Optional + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + // The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + // +kubebuilder:validation:Optional + DefaultPipeline *string `json:"defaultPipeline,omitempty" tf:"default_pipeline,omitempty"` + + // (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + // A boolean that indicates that the index should be deleted even if it contains documents. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // (String) The length of time that a deleted document's version number remains available for further versioned operations. + // The length of time that a deleted document's version number remains available for further versioned operations. + // +kubebuilder:validation:Optional + GcDeletes *string `json:"gcDeletes,omitempty" tf:"gc_deletes,omitempty"` + + // (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + // The maximum number of characters that will be analyzed for a highlight request. A stringified number. + // +kubebuilder:validation:Optional + HighlightMaxAnalyzedOffset *string `json:"highlightMaxAnalyzedOffset,omitempty" tf:"highlight_max_analyzed_offset,omitempty"` + + // (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + // A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + // +kubebuilder:validation:Optional + IncludeTypeName *string `json:"includeTypeName,omitempty" tf:"include_type_name,omitempty"` + + // NN search functionality will be disabled. + // Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + // +kubebuilder:validation:Optional + IndexKnn *bool `json:"indexKnn,omitempty" tf:"index_knn,omitempty"` + + // NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + // The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + // +kubebuilder:validation:Optional + IndexKnnAlgoParamEfSearch *string `json:"indexKnnAlgoParamEfSearch,omitempty" tf:"index_knn_algo_param_ef_search,omitempty"` + + // (String) A JSON string describing the default index similarity config. + // A JSON string describing the default index similarity config. + // +kubebuilder:validation:Optional + IndexSimilarityDefault *string `json:"indexSimilarityDefault,omitempty" tf:"index_similarity_default,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + // +kubebuilder:validation:Optional + IndexingSlowlogLevel *string `json:"indexingSlowlogLevel,omitempty" tf:"indexing_slowlog_level,omitempty"` + + // (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + // Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + // +kubebuilder:validation:Optional + IndexingSlowlogSource *string `json:"indexingSlowlogSource,omitempty" tf:"indexing_slowlog_source,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + // +kubebuilder:validation:Optional + IndexingSlowlogThresholdIndexDebug *string `json:"indexingSlowlogThresholdIndexDebug,omitempty" tf:"indexing_slowlog_threshold_index_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + // +kubebuilder:validation:Optional + IndexingSlowlogThresholdIndexInfo *string `json:"indexingSlowlogThresholdIndexInfo,omitempty" tf:"indexing_slowlog_threshold_index_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + // +kubebuilder:validation:Optional + IndexingSlowlogThresholdIndexTrace *string `json:"indexingSlowlogThresholdIndexTrace,omitempty" tf:"indexing_slowlog_threshold_index_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + // +kubebuilder:validation:Optional + IndexingSlowlogThresholdIndexWarn *string `json:"indexingSlowlogThresholdIndexWarn,omitempty" tf:"indexing_slowlog_threshold_index_warn,omitempty"` + + // loaded for nested queries. This can be set only on creation. + // Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + // +kubebuilder:validation:Optional + LoadFixedBitsetFiltersEagerly *bool `json:"loadFixedBitsetFiltersEagerly,omitempty" tf:"load_fixed_bitset_filters_eagerly,omitempty"` + + // (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + // A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + // +kubebuilder:validation:Optional + Mappings *string `json:"mappings,omitempty" tf:"mappings,omitempty"` + + // (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + // The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + // +kubebuilder:validation:Optional + MaxDocvalueFieldsSearch *string `json:"maxDocvalueFieldsSearch,omitempty" tf:"max_docvalue_fields_search,omitempty"` + + // (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + // The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + // +kubebuilder:validation:Optional + MaxInnerResultWindow *string `json:"maxInnerResultWindow,omitempty" tf:"max_inner_result_window,omitempty"` + + // (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + // The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + // +kubebuilder:validation:Optional + MaxNgramDiff *string `json:"maxNgramDiff,omitempty" tf:"max_ngram_diff,omitempty"` + + // (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + // Maximum number of refresh listeners available on each shard of the index. A stringified number. + // +kubebuilder:validation:Optional + MaxRefreshListeners *string `json:"maxRefreshListeners,omitempty" tf:"max_refresh_listeners,omitempty"` + + // (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + // The maximum length of regex that can be used in Regexp Query. A stringified number. + // +kubebuilder:validation:Optional + MaxRegexLength *string `json:"maxRegexLength,omitempty" tf:"max_regex_length,omitempty"` + + // (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + // The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + // +kubebuilder:validation:Optional + MaxRescoreWindow *string `json:"maxRescoreWindow,omitempty" tf:"max_rescore_window,omitempty"` + + // (String) The maximum value of from + size for searches to this index. A stringified number. + // The maximum value of `from + size` for searches to this index. A stringified number. + // +kubebuilder:validation:Optional + MaxResultWindow *string `json:"maxResultWindow,omitempty" tf:"max_result_window,omitempty"` + + // (String) The maximum number of script_fields that are allowed in a query. A stringified number. + // The maximum number of `script_fields` that are allowed in a query. A stringified number. + // +kubebuilder:validation:Optional + MaxScriptFields *string `json:"maxScriptFields,omitempty" tf:"max_script_fields,omitempty"` + + // (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + // The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + // +kubebuilder:validation:Optional + MaxShingleDiff *string `json:"maxShingleDiff,omitempty" tf:"max_shingle_diff,omitempty"` + + // (String) The maximum number of terms that can be used in Terms Query. A stringified number. + // The maximum number of terms that can be used in Terms Query. A stringified number. + // +kubebuilder:validation:Optional + MaxTermsCount *string `json:"maxTermsCount,omitempty" tf:"max_terms_count,omitempty"` + + // (String) Name of the index to create + // Name of the index to create + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (String) Number of shard replicas. A stringified number. + // Number of shard replicas. A stringified number. + // +kubebuilder:validation:Optional + NumberOfReplicas *string `json:"numberOfReplicas,omitempty" tf:"number_of_replicas,omitempty"` + + // (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + // Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + // +kubebuilder:validation:Optional + NumberOfRoutingShards *string `json:"numberOfRoutingShards,omitempty" tf:"number_of_routing_shards,omitempty"` + + // (String) Number of shards for the index. This can be set only on creation. + // Number of shards for the index. This can be set only on creation. + // +kubebuilder:validation:Optional + NumberOfShards *string `json:"numberOfShards,omitempty" tf:"number_of_shards,omitempty"` + + // 1 to disable refresh. + // How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + // +kubebuilder:validation:Optional + RefreshInterval *string `json:"refreshInterval,omitempty" tf:"refresh_interval,omitempty"` + + // (String) + // +kubebuilder:validation:Optional + RolloverAlias *string `json:"rolloverAlias,omitempty" tf:"rollover_alias,omitempty"` + + // (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + // Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + // +kubebuilder:validation:Optional + RoutingAllocationEnable *string `json:"routingAllocationEnable,omitempty" tf:"routing_allocation_enable,omitempty"` + + // (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + // The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + // +kubebuilder:validation:Optional + RoutingPartitionSize *string `json:"routingPartitionSize,omitempty" tf:"routing_partition_size,omitempty"` + + // (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + // Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + // +kubebuilder:validation:Optional + RoutingRebalanceEnable *string `json:"routingRebalanceEnable,omitempty" tf:"routing_rebalance_enable,omitempty"` + + // (String) How long a shard can not receive a search or get request until it’s considered search idle. + // How long a shard can not receive a search or get request until it’s considered search idle. + // +kubebuilder:validation:Optional + SearchIdleAfter *string `json:"searchIdleAfter,omitempty" tf:"search_idle_after,omitempty"` + + // (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + // Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + // +kubebuilder:validation:Optional + SearchSlowlogLevel *string `json:"searchSlowlogLevel,omitempty" tf:"search_slowlog_level,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdFetchDebug *string `json:"searchSlowlogThresholdFetchDebug,omitempty" tf:"search_slowlog_threshold_fetch_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdFetchInfo *string `json:"searchSlowlogThresholdFetchInfo,omitempty" tf:"search_slowlog_threshold_fetch_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdFetchTrace *string `json:"searchSlowlogThresholdFetchTrace,omitempty" tf:"search_slowlog_threshold_fetch_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdFetchWarn *string `json:"searchSlowlogThresholdFetchWarn,omitempty" tf:"search_slowlog_threshold_fetch_warn,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdQueryDebug *string `json:"searchSlowlogThresholdQueryDebug,omitempty" tf:"search_slowlog_threshold_query_debug,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdQueryInfo *string `json:"searchSlowlogThresholdQueryInfo,omitempty" tf:"search_slowlog_threshold_query_info,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdQueryTrace *string `json:"searchSlowlogThresholdQueryTrace,omitempty" tf:"search_slowlog_threshold_query_trace,omitempty"` + + // (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + // Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + // +kubebuilder:validation:Optional + SearchSlowlogThresholdQueryWarn *string `json:"searchSlowlogThresholdQueryWarn,omitempty" tf:"search_slowlog_threshold_query_warn,omitempty"` + + // (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + // Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + // +kubebuilder:validation:Optional + ShardCheckOnStartup *string `json:"shardCheckOnStartup,omitempty" tf:"shard_check_on_startup,omitempty"` + + // (String) The field to sort shards in this index by. + // The field to sort shards in this index by. + // +kubebuilder:validation:Optional + SortField *string `json:"sortField,omitempty" tf:"sort_field,omitempty"` + + // (String) The direction to sort shards in. Accepts asc, desc. + // The direction to sort shards in. Accepts `asc`, `desc`. + // +kubebuilder:validation:Optional + SortOrder *string `json:"sortOrder,omitempty" tf:"sort_order,omitempty"` +} + +// IndexSpec defines the desired state of Index +type IndexSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IndexParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IndexInitParameters `json:"initProvider,omitempty"` +} + +// IndexStatus defines the observed state of Index. +type IndexStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IndexObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Index is the Schema for the Indexs API. Provides an OpenSearch index resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Index struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec IndexSpec `json:"spec"` + Status IndexStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IndexList contains a list of Indexs +type IndexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Index `json:"items"` +} + +// Repository type metadata. +var ( + Index_Kind = "Index" + Index_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Index_Kind}.String() + Index_KindAPIVersion = Index_Kind + "." + CRDGroupVersion.String() + Index_GroupVersionKind = CRDGroupVersion.WithKind(Index_Kind) +) + +func init() { + SchemeBuilder.Register(&Index{}, &IndexList{}) +} diff --git a/apis/opensearch/v1alpha1/zz_monitor_terraformed.go b/apis/opensearch/v1alpha1/zz_monitor_terraformed.go new file mode 100755 index 0000000..450766e --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_monitor_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Monitor +func (mg *Monitor) GetTerraformResourceType() string { + return "opensearch_monitor" +} + +// GetConnectionDetailsMapping for this Monitor +func (tr *Monitor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Monitor +func (tr *Monitor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Monitor +func (tr *Monitor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Monitor +func (tr *Monitor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Monitor +func (tr *Monitor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Monitor +func (tr *Monitor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Monitor +func (tr *Monitor) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Monitor +func (tr *Monitor) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Monitor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Monitor) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Monitor) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1alpha1/zz_monitor_types.go b/apis/opensearch/v1alpha1/zz_monitor_types.go new file mode 100755 index 0000000..f055b4f --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_monitor_types.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonitorInitParameters struct { + + // (String) The monitor document + // The monitor document + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +type MonitorObservation struct { + + // (String) The monitor document + // The monitor document + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type MonitorParameters struct { + + // (String) The monitor document + // The monitor document + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` +} + +// MonitorSpec defines the desired state of Monitor +type MonitorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorInitParameters `json:"initProvider,omitempty"` +} + +// MonitorStatus defines the observed state of Monitor. +type MonitorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Monitor is the Schema for the Monitors API. Provides an OpenSearch monitor. Please refer to the OpenSearch monitor documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Monitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + Spec MonitorSpec `json:"spec"` + Status MonitorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorList contains a list of Monitors +type MonitorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Monitor `json:"items"` +} + +// Repository type metadata. +var ( + Monitor_Kind = "Monitor" + Monitor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Monitor_Kind}.String() + Monitor_KindAPIVersion = Monitor_Kind + "." + CRDGroupVersion.String() + Monitor_GroupVersionKind = CRDGroupVersion.WithKind(Monitor_Kind) +) + +func init() { + SchemeBuilder.Register(&Monitor{}, &MonitorList{}) +} diff --git a/apis/opensearch/v1alpha1/zz_role_terraformed.go b/apis/opensearch/v1alpha1/zz_role_terraformed.go new file mode 100755 index 0000000..0c3767f --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_role_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Role +func (mg *Role) GetTerraformResourceType() string { + return "opensearch_role" +} + +// GetConnectionDetailsMapping for this Role +func (tr *Role) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Role +func (tr *Role) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Role +func (tr *Role) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Role +func (tr *Role) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Role +func (tr *Role) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Role +func (tr *Role) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Role +func (tr *Role) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Role +func (tr *Role) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Role using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Role) LateInitialize(attrs []byte) (bool, error) { + params := &RoleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Role) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1alpha1/zz_role_types.go b/apis/opensearch/v1alpha1/zz_role_types.go new file mode 100755 index 0000000..f3eb7f3 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_role_types.go @@ -0,0 +1,283 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IndexPermissionsInitParameters struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // level security (json formatted using jsonencode). + // A selector for document-level security (json formatted using jsonencode). + DocumentLevelSecurity *string `json:"documentLevelSecurity,omitempty" tf:"document_level_security,omitempty"` + + // level security. + // A list of selectors for field-level security. + // +listType=set + FieldLevelSecurity []*string `json:"fieldLevelSecurity,omitempty" tf:"field_level_security,omitempty"` + + // (Set of String) A list of glob patterns for the index names. + // A list of glob patterns for the index names. + // +listType=set + IndexPatterns []*string `json:"indexPatterns,omitempty" tf:"index_patterns,omitempty"` + + // (Set of String) A list of masked fields + // A list of masked fields + // +listType=set + MaskedFields []*string `json:"maskedFields,omitempty" tf:"masked_fields,omitempty"` +} + +type IndexPermissionsObservation struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // level security (json formatted using jsonencode). + // A selector for document-level security (json formatted using jsonencode). + DocumentLevelSecurity *string `json:"documentLevelSecurity,omitempty" tf:"document_level_security,omitempty"` + + // level security. + // A list of selectors for field-level security. + // +listType=set + FieldLevelSecurity []*string `json:"fieldLevelSecurity,omitempty" tf:"field_level_security,omitempty"` + + // (Set of String) A list of glob patterns for the index names. + // A list of glob patterns for the index names. + // +listType=set + IndexPatterns []*string `json:"indexPatterns,omitempty" tf:"index_patterns,omitempty"` + + // (Set of String) A list of masked fields + // A list of masked fields + // +listType=set + MaskedFields []*string `json:"maskedFields,omitempty" tf:"masked_fields,omitempty"` +} + +type IndexPermissionsParameters struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +kubebuilder:validation:Optional + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // level security (json formatted using jsonencode). + // A selector for document-level security (json formatted using jsonencode). + // +kubebuilder:validation:Optional + DocumentLevelSecurity *string `json:"documentLevelSecurity,omitempty" tf:"document_level_security,omitempty"` + + // level security. + // A list of selectors for field-level security. + // +kubebuilder:validation:Optional + // +listType=set + FieldLevelSecurity []*string `json:"fieldLevelSecurity,omitempty" tf:"field_level_security,omitempty"` + + // (Set of String) A list of glob patterns for the index names. + // A list of glob patterns for the index names. + // +kubebuilder:validation:Optional + // +listType=set + IndexPatterns []*string `json:"indexPatterns,omitempty" tf:"index_patterns,omitempty"` + + // (Set of String) A list of masked fields + // A list of masked fields + // +kubebuilder:validation:Optional + // +listType=set + MaskedFields []*string `json:"maskedFields,omitempty" tf:"masked_fields,omitempty"` +} + +type RoleInitParameters struct { + + // (Set of String) A list of cluster permissions. + // A list of cluster permissions. + // +listType=set + ClusterPermissions []*string `json:"clusterPermissions,omitempty" tf:"cluster_permissions,omitempty"` + + // (String) Description of the role. + // Description of the role. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (Block Set) A configuration of index permissions (see below for nested schema) + // A configuration of index permissions + IndexPermissions []IndexPermissionsInitParameters `json:"indexPermissions,omitempty" tf:"index_permissions,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // (Block Set) A configuration of tenant permissions (see below for nested schema) + // A configuration of tenant permissions + TenantPermissions []TenantPermissionsInitParameters `json:"tenantPermissions,omitempty" tf:"tenant_permissions,omitempty"` +} + +type RoleObservation struct { + + // (Set of String) A list of cluster permissions. + // A list of cluster permissions. + // +listType=set + ClusterPermissions []*string `json:"clusterPermissions,omitempty" tf:"cluster_permissions,omitempty"` + + // (String) Description of the role. + // Description of the role. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (Block Set) A configuration of index permissions (see below for nested schema) + // A configuration of index permissions + IndexPermissions []IndexPermissionsObservation `json:"indexPermissions,omitempty" tf:"index_permissions,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // (Block Set) A configuration of tenant permissions (see below for nested schema) + // A configuration of tenant permissions + TenantPermissions []TenantPermissionsObservation `json:"tenantPermissions,omitempty" tf:"tenant_permissions,omitempty"` +} + +type RoleParameters struct { + + // (Set of String) A list of cluster permissions. + // A list of cluster permissions. + // +kubebuilder:validation:Optional + // +listType=set + ClusterPermissions []*string `json:"clusterPermissions,omitempty" tf:"cluster_permissions,omitempty"` + + // (String) Description of the role. + // Description of the role. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (Block Set) A configuration of index permissions (see below for nested schema) + // A configuration of index permissions + // +kubebuilder:validation:Optional + IndexPermissions []IndexPermissionsParameters `json:"indexPermissions,omitempty" tf:"index_permissions,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + // +kubebuilder:validation:Optional + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // (Block Set) A configuration of tenant permissions (see below for nested schema) + // A configuration of tenant permissions + // +kubebuilder:validation:Optional + TenantPermissions []TenantPermissionsParameters `json:"tenantPermissions,omitempty" tf:"tenant_permissions,omitempty"` +} + +type TenantPermissionsInitParameters struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // (Set of String) A list of glob patterns for the tenant names + // A list of glob patterns for the tenant names + // +listType=set + TenantPatterns []*string `json:"tenantPatterns,omitempty" tf:"tenant_patterns,omitempty"` +} + +type TenantPermissionsObservation struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // (Set of String) A list of glob patterns for the tenant names + // A list of glob patterns for the tenant names + // +listType=set + TenantPatterns []*string `json:"tenantPatterns,omitempty" tf:"tenant_patterns,omitempty"` +} + +type TenantPermissionsParameters struct { + + // (Set of String) A list of allowed actions. + // A list of allowed actions. + // +kubebuilder:validation:Optional + // +listType=set + AllowedActions []*string `json:"allowedActions,omitempty" tf:"allowed_actions,omitempty"` + + // (Set of String) A list of glob patterns for the tenant names + // A list of glob patterns for the tenant names + // +kubebuilder:validation:Optional + // +listType=set + TenantPatterns []*string `json:"tenantPatterns,omitempty" tf:"tenant_patterns,omitempty"` +} + +// RoleSpec defines the desired state of Role +type RoleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RoleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RoleInitParameters `json:"initProvider,omitempty"` +} + +// RoleStatus defines the observed state of Role. +type RoleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RoleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Role is the Schema for the Roles API. Provides an OpenSearch security role resource. Please refer to the OpenSearch Access Control documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Role struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roleName) || (has(self.initProvider) && has(self.initProvider.roleName))",message="spec.forProvider.roleName is a required parameter" + Spec RoleSpec `json:"spec"` + Status RoleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RoleList contains a list of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Role `json:"items"` +} + +// Repository type metadata. +var ( + Role_Kind = "Role" + Role_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Role_Kind}.String() + Role_KindAPIVersion = Role_Kind + "." + CRDGroupVersion.String() + Role_GroupVersionKind = CRDGroupVersion.WithKind(Role_Kind) +) + +func init() { + SchemeBuilder.Register(&Role{}, &RoleList{}) +} diff --git a/apis/opensearch/v1alpha1/zz_script_terraformed.go b/apis/opensearch/v1alpha1/zz_script_terraformed.go new file mode 100755 index 0000000..fa7236c --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_script_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Script +func (mg *Script) GetTerraformResourceType() string { + return "opensearch_script" +} + +// GetConnectionDetailsMapping for this Script +func (tr *Script) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Script +func (tr *Script) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Script +func (tr *Script) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Script +func (tr *Script) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Script +func (tr *Script) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Script +func (tr *Script) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Script +func (tr *Script) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Script +func (tr *Script) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Script using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Script) LateInitialize(attrs []byte) (bool, error) { + params := &ScriptParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Script) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1alpha1/zz_script_types.go b/apis/opensearch/v1alpha1/zz_script_types.go new file mode 100755 index 0000000..e3b8db0 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_script_types.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ScriptInitParameters struct { + + // (String) Specifies the language the script is written in. Defaults to painless. + // Specifies the language the script is written in. Defaults to painless. + Lang *string `json:"lang,omitempty" tf:"lang,omitempty"` + + // (String) Identifier for the stored script. Must be unique within the cluster. + // Identifier for the stored script. Must be unique within the cluster. + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // (String) The source of the stored script + // The source of the stored script + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type ScriptObservation struct { + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) Specifies the language the script is written in. Defaults to painless. + // Specifies the language the script is written in. Defaults to painless. + Lang *string `json:"lang,omitempty" tf:"lang,omitempty"` + + // (String) Identifier for the stored script. Must be unique within the cluster. + // Identifier for the stored script. Must be unique within the cluster. + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // (String) The source of the stored script + // The source of the stored script + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type ScriptParameters struct { + + // (String) Specifies the language the script is written in. Defaults to painless. + // Specifies the language the script is written in. Defaults to painless. + // +kubebuilder:validation:Optional + Lang *string `json:"lang,omitempty" tf:"lang,omitempty"` + + // (String) Identifier for the stored script. Must be unique within the cluster. + // Identifier for the stored script. Must be unique within the cluster. + // +kubebuilder:validation:Optional + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // (String) The source of the stored script + // The source of the stored script + // +kubebuilder:validation:Optional + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +// ScriptSpec defines the desired state of Script +type ScriptSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScriptParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScriptInitParameters `json:"initProvider,omitempty"` +} + +// ScriptStatus defines the observed state of Script. +type ScriptStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScriptObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Script is the Schema for the Scripts API. Provides an OpenSearch script resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Script struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scriptId) || (has(self.initProvider) && has(self.initProvider.scriptId))",message="spec.forProvider.scriptId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + Spec ScriptSpec `json:"spec"` + Status ScriptStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScriptList contains a list of Scripts +type ScriptList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Script `json:"items"` +} + +// Repository type metadata. +var ( + Script_Kind = "Script" + Script_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Script_Kind}.String() + Script_KindAPIVersion = Script_Kind + "." + CRDGroupVersion.String() + Script_GroupVersionKind = CRDGroupVersion.WithKind(Script_Kind) +) + +func init() { + SchemeBuilder.Register(&Script{}, &ScriptList{}) +} diff --git a/apis/opensearch/v1alpha1/zz_user_terraformed.go b/apis/opensearch/v1alpha1/zz_user_terraformed.go new file mode 100755 index 0000000..c909ab0 --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "opensearch_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "spec.forProvider.passwordSecretRef", "password_hash": "spec.forProvider.passwordHashSecretRef"} +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1alpha1/zz_user_types.go b/apis/opensearch/v1alpha1/zz_user_types.go new file mode 100755 index 0000000..af345ce --- /dev/null +++ b/apis/opensearch/v1alpha1/zz_user_types.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type UserInitParameters struct { + + // (Map of String) A map of arbitrary key value string pairs stored alongside of users. + // A map of arbitrary key value string pairs stored alongside of users. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the user. + // Description of the user. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The name of the security user. + // The name of the security user. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type UserObservation struct { + + // (Map of String) A map of arbitrary key value string pairs stored alongside of users. + // A map of arbitrary key value string pairs stored alongside of users. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the user. + // Description of the user. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the security user. + // The name of the security user. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type UserParameters struct { + + // (Map of String) A map of arbitrary key value string pairs stored alongside of users. + // A map of arbitrary key value string pairs stored alongside of users. + // +kubebuilder:validation:Optional + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +kubebuilder:validation:Optional + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the user. + // Description of the user. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // hashed password for the user, cannot be specified with password. + // The pre-hashed password for the user, cannot be specified with `password`. + // +kubebuilder:validation:Optional + PasswordHashSecretRef *v1.SecretKeySelector `json:"passwordHashSecretRef,omitempty" tf:"-"` + + // descriptive HTTP 400 Bad Request error. For AWS OpenSearch domains "password must be at least 8 characters long and contain at least one uppercase letter, one lowercase letter, one digit, and one special character". + // The plain text password for the user, cannot be specified with `password_hash`. Some implementations may enforce a password policy. Invalid passwords may cause a non-descriptive HTTP 400 Bad Request error. For AWS OpenSearch domains "password must be at least 8 characters long and contain at least one uppercase letter, one lowercase letter, one digit, and one special character". + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // (String) The name of the security user. + // The name of the security user. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// User is the Schema for the Users API. Provides an OpenSearch security user. Please refer to the OpenSearch Access Control documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.username) || (has(self.initProvider) && has(self.initProvider.username))",message="spec.forProvider.username is a required parameter" + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/roles/v1alpha1/zz_generated.conversion_hubs.go b/apis/roles/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..2abc807 --- /dev/null +++ b/apis/roles/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Mapping) Hub() {} diff --git a/apis/roles/v1alpha1/zz_generated.deepcopy.go b/apis/roles/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..5793bb5 --- /dev/null +++ b/apis/roles/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,360 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mapping) DeepCopyInto(out *Mapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mapping. +func (in *Mapping) DeepCopy() *Mapping { + if in == nil { + return nil + } + out := new(Mapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Mapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingInitParameters) DeepCopyInto(out *MappingInitParameters) { + *out = *in + if in.AndBackendRoles != nil { + in, out := &in.AndBackendRoles, &out.AndBackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleNameRef != nil { + in, out := &in.RoleNameRef, &out.RoleNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleNameSelector != nil { + in, out := &in.RoleNameSelector, &out.RoleNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsersRefs != nil { + in, out := &in.UsersRefs, &out.UsersRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UsersSelector != nil { + in, out := &in.UsersSelector, &out.UsersSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingInitParameters. +func (in *MappingInitParameters) DeepCopy() *MappingInitParameters { + if in == nil { + return nil + } + out := new(MappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingList) DeepCopyInto(out *MappingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Mapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingList. +func (in *MappingList) DeepCopy() *MappingList { + if in == nil { + return nil + } + out := new(MappingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MappingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingObservation) DeepCopyInto(out *MappingObservation) { + *out = *in + if in.AndBackendRoles != nil { + in, out := &in.AndBackendRoles, &out.AndBackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingObservation. +func (in *MappingObservation) DeepCopy() *MappingObservation { + if in == nil { + return nil + } + out := new(MappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParameters) DeepCopyInto(out *MappingParameters) { + *out = *in + if in.AndBackendRoles != nil { + in, out := &in.AndBackendRoles, &out.AndBackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackendRoles != nil { + in, out := &in.BackendRoles, &out.BackendRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleNameRef != nil { + in, out := &in.RoleNameRef, &out.RoleNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleNameSelector != nil { + in, out := &in.RoleNameSelector, &out.RoleNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsersRefs != nil { + in, out := &in.UsersRefs, &out.UsersRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UsersSelector != nil { + in, out := &in.UsersSelector, &out.UsersSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParameters. +func (in *MappingParameters) DeepCopy() *MappingParameters { + if in == nil { + return nil + } + out := new(MappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingSpec) DeepCopyInto(out *MappingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingSpec. +func (in *MappingSpec) DeepCopy() *MappingSpec { + if in == nil { + return nil + } + out := new(MappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingStatus) DeepCopyInto(out *MappingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingStatus. +func (in *MappingStatus) DeepCopy() *MappingStatus { + if in == nil { + return nil + } + out := new(MappingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/roles/v1alpha1/zz_generated.managed.go b/apis/roles/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..f8e4418 --- /dev/null +++ b/apis/roles/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Mapping. +func (mg *Mapping) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Mapping. +func (mg *Mapping) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Mapping. +func (mg *Mapping) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Mapping. +func (mg *Mapping) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Mapping. +func (mg *Mapping) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Mapping. +func (mg *Mapping) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Mapping. +func (mg *Mapping) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Mapping. +func (mg *Mapping) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Mapping. +func (mg *Mapping) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Mapping. +func (mg *Mapping) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Mapping. +func (mg *Mapping) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Mapping. +func (mg *Mapping) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/roles/v1alpha1/zz_generated.managedlist.go b/apis/roles/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..00a53f1 --- /dev/null +++ b/apis/roles/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MappingList. +func (l *MappingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/roles/v1alpha1/zz_generated.resolvers.go b/apis/roles/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..d0eae2e --- /dev/null +++ b/apis/roles/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,86 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Mapping. +func (mg *Mapping) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RoleNameRef, + Selector: mg.Spec.ForProvider.RoleNameSelector, + To: reference.To{ + List: &v1alpha1.RoleList{}, + Managed: &v1alpha1.Role{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleName") + } + mg.Spec.ForProvider.RoleName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleNameRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Users), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.UsersRefs, + Selector: mg.Spec.ForProvider.UsersSelector, + To: reference.To{ + List: &v1alpha1.UserList{}, + Managed: &v1alpha1.User{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Users") + } + mg.Spec.ForProvider.Users = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.UsersRefs = mrsp.ResolvedReferences + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RoleNameRef, + Selector: mg.Spec.InitProvider.RoleNameSelector, + To: reference.To{ + List: &v1alpha1.RoleList{}, + Managed: &v1alpha1.Role{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleName") + } + mg.Spec.InitProvider.RoleName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleNameRef = rsp.ResolvedReference + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Users), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.UsersRefs, + Selector: mg.Spec.InitProvider.UsersSelector, + To: reference.To{ + List: &v1alpha1.UserList{}, + Managed: &v1alpha1.User{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Users") + } + mg.Spec.InitProvider.Users = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.UsersRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/roles/v1alpha1/zz_groupversion_info.go b/apis/roles/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..b6b561b --- /dev/null +++ b/apis/roles/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=roles.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "roles.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/roles/v1alpha1/zz_mapping_terraformed.go b/apis/roles/v1alpha1/zz_mapping_terraformed.go new file mode 100755 index 0000000..8d00b42 --- /dev/null +++ b/apis/roles/v1alpha1/zz_mapping_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Mapping +func (mg *Mapping) GetTerraformResourceType() string { + return "opensearch_roles_mapping" +} + +// GetConnectionDetailsMapping for this Mapping +func (tr *Mapping) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Mapping +func (tr *Mapping) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Mapping +func (tr *Mapping) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Mapping +func (tr *Mapping) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Mapping +func (tr *Mapping) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Mapping +func (tr *Mapping) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Mapping +func (tr *Mapping) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Mapping +func (tr *Mapping) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Mapping using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Mapping) LateInitialize(attrs []byte) (bool, error) { + params := &MappingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Mapping) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/roles/v1alpha1/zz_mapping_types.go b/apis/roles/v1alpha1/zz_mapping_types.go new file mode 100755 index 0000000..74aa829 --- /dev/null +++ b/apis/roles/v1alpha1/zz_mapping_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MappingInitParameters struct { + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + AndBackendRoles []*string `json:"andBackendRoles,omitempty" tf:"and_backend_roles,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the role mapping. + // Description of the role mapping. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (Set of String) A list of host names. + // A list of host names. + // +listType=set + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1.Role + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // Reference to a Role in opensearch to populate roleName. + // +kubebuilder:validation:Optional + RoleNameRef *v1.Reference `json:"roleNameRef,omitempty" tf:"-"` + + // Selector for a Role in opensearch to populate roleName. + // +kubebuilder:validation:Optional + RoleNameSelector *v1.Selector `json:"roleNameSelector,omitempty" tf:"-"` + + // (Set of String) A list of users. + // A list of users. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1.User + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` + + // References to User in opensearch to populate users. + // +kubebuilder:validation:Optional + UsersRefs []v1.Reference `json:"usersRefs,omitempty" tf:"-"` + + // Selector for a list of User in opensearch to populate users. + // +kubebuilder:validation:Optional + UsersSelector *v1.Selector `json:"usersSelector,omitempty" tf:"-"` +} + +type MappingObservation struct { + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + AndBackendRoles []*string `json:"andBackendRoles,omitempty" tf:"and_backend_roles,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the role mapping. + // Description of the role mapping. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (Set of String) A list of host names. + // A list of host names. + // +listType=set + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // (Set of String) A list of users. + // A list of users. + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` +} + +type MappingParameters struct { + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +kubebuilder:validation:Optional + // +listType=set + AndBackendRoles []*string `json:"andBackendRoles,omitempty" tf:"and_backend_roles,omitempty"` + + // (Set of String) A list of backend roles. + // A list of backend roles. + // +kubebuilder:validation:Optional + // +listType=set + BackendRoles []*string `json:"backendRoles,omitempty" tf:"backend_roles,omitempty"` + + // (String) Description of the role mapping. + // Description of the role mapping. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // (Set of String) A list of host names. + // A list of host names. + // +kubebuilder:validation:Optional + // +listType=set + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // (String) The name of the security role. + // The name of the security role. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1.Role + // +kubebuilder:validation:Optional + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // Reference to a Role in opensearch to populate roleName. + // +kubebuilder:validation:Optional + RoleNameRef *v1.Reference `json:"roleNameRef,omitempty" tf:"-"` + + // Selector for a Role in opensearch to populate roleName. + // +kubebuilder:validation:Optional + RoleNameSelector *v1.Selector `json:"roleNameSelector,omitempty" tf:"-"` + + // (Set of String) A list of users. + // A list of users. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1.User + // +kubebuilder:validation:Optional + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` + + // References to User in opensearch to populate users. + // +kubebuilder:validation:Optional + UsersRefs []v1.Reference `json:"usersRefs,omitempty" tf:"-"` + + // Selector for a list of User in opensearch to populate users. + // +kubebuilder:validation:Optional + UsersSelector *v1.Selector `json:"usersSelector,omitempty" tf:"-"` +} + +// MappingSpec defines the desired state of Mapping +type MappingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MappingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MappingInitParameters `json:"initProvider,omitempty"` +} + +// MappingStatus defines the observed state of Mapping. +type MappingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MappingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Mapping is the Schema for the Mappings API. Provides an OpenSearch security role mapping. Please refer to the OpenSearch Access Control documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Mapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MappingSpec `json:"spec"` + Status MappingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MappingList contains a list of Mappings +type MappingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Mapping `json:"items"` +} + +// Repository type metadata. +var ( + Mapping_Kind = "Mapping" + Mapping_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Mapping_Kind}.String() + Mapping_KindAPIVersion = Mapping_Kind + "." + CRDGroupVersion.String() + Mapping_GroupVersionKind = CRDGroupVersion.WithKind(Mapping_Kind) +) + +func init() { + SchemeBuilder.Register(&Mapping{}, &MappingList{}) +} diff --git a/apis/sm/v1alpha1/zz_generated.conversion_hubs.go b/apis/sm/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..0ddd1d8 --- /dev/null +++ b/apis/sm/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} diff --git a/apis/sm/v1alpha1/zz_generated.deepcopy.go b/apis/sm/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..2d7634a --- /dev/null +++ b/apis/sm/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,213 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PrimaryTerm != nil { + in, out := &in.PrimaryTerm, &out.PrimaryTerm + *out = new(float64) + **out = **in + } + if in.SeqNo != nil { + in, out := &in.SeqNo, &out.SeqNo + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sm/v1alpha1/zz_generated.managed.go b/apis/sm/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..0f27a8d --- /dev/null +++ b/apis/sm/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Policy. +func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Policy. +func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Policy. +func (mg *Policy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Policy. +func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Policy. +func (mg *Policy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Policy. +func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Policy. +func (mg *Policy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Policy. +func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/sm/v1alpha1/zz_generated.managedlist.go b/apis/sm/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..cbc784b --- /dev/null +++ b/apis/sm/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PolicyList. +func (l *PolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/sm/v1alpha1/zz_groupversion_info.go b/apis/sm/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..ab8bdf5 --- /dev/null +++ b/apis/sm/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=sm.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "sm.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sm/v1alpha1/zz_policy_terraformed.go b/apis/sm/v1alpha1/zz_policy_terraformed.go new file mode 100755 index 0000000..ab244d2 --- /dev/null +++ b/apis/sm/v1alpha1/zz_policy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Policy +func (mg *Policy) GetTerraformResourceType() string { + return "opensearch_sm_policy" +} + +// GetConnectionDetailsMapping for this Policy +func (tr *Policy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Policy +func (tr *Policy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Policy +func (tr *Policy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Policy +func (tr *Policy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Policy +func (tr *Policy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Policy +func (tr *Policy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Policy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Policy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sm/v1alpha1/zz_policy_types.go b/apis/sm/v1alpha1/zz_policy_types.go new file mode 100755 index 0000000..9dafd9f --- /dev/null +++ b/apis/sm/v1alpha1/zz_policy_types.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PolicyInitParameters struct { + + // (String) The policy document. + // The policy document. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the SM policy. + // The name of the SM policy. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // (Number) The primary term of the SM policy version. + // The primary term of the SM policy version. + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the SM policy version. + // The sequence number of the SM policy version. + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +type PolicyObservation struct { + + // (String) The policy document. + // The policy document. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the SM policy. + // The name of the SM policy. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // (Number) The primary term of the SM policy version. + // The primary term of the SM policy version. + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the SM policy version. + // The sequence number of the SM policy version. + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +type PolicyParameters struct { + + // (String) The policy document. + // The policy document. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // (String) The name of the SM policy. + // The name of the SM policy. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // (Number) The primary term of the SM policy version. + // The primary term of the SM policy version. + // +kubebuilder:validation:Optional + PrimaryTerm *float64 `json:"primaryTerm,omitempty" tf:"primary_term,omitempty"` + + // (Number) The sequence number of the SM policy version. + // The sequence number of the SM policy version. + // +kubebuilder:validation:Optional + SeqNo *float64 `json:"seqNo,omitempty" tf:"seq_no,omitempty"` +} + +// PolicySpec defines the desired state of Policy +type PolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` +} + +// PolicyStatus defines the observed state of Policy. +type PolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Policy is the Schema for the Policys API. Provides an OpenSearch Snapshot Management (SM) policy. Please refer to the OpenSearch SM documentation for details. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.body) || (has(self.initProvider) && has(self.initProvider.body))",message="spec.forProvider.body is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyName) || (has(self.initProvider) && has(self.initProvider.policyName))",message="spec.forProvider.policyName is a required parameter" + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyList contains a list of Policys +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Policy `json:"items"` +} + +// Repository type metadata. +var ( + Policy_Kind = "Policy" + Policy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Policy_Kind}.String() + Policy_KindAPIVersion = Policy_Kind + "." + CRDGroupVersion.String() + Policy_GroupVersionKind = CRDGroupVersion.WithKind(Policy_Kind) +) + +func init() { + SchemeBuilder.Register(&Policy{}, &PolicyList{}) +} diff --git a/apis/snapshot/v1alpha1/zz_generated.conversion_hubs.go b/apis/snapshot/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..e29cfb5 --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Repository) Hub() {} diff --git a/apis/snapshot/v1alpha1/zz_generated.deepcopy.go b/apis/snapshot/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..d8a8dd9 --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,231 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Repository) DeepCopyInto(out *Repository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Repository. +func (in *Repository) DeepCopy() *Repository { + if in == nil { + return nil + } + out := new(Repository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Repository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryInitParameters. +func (in *RepositoryInitParameters) DeepCopy() *RepositoryInitParameters { + if in == nil { + return nil + } + out := new(RepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryList) DeepCopyInto(out *RepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Repository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryList. +func (in *RepositoryList) DeepCopy() *RepositoryList { + if in == nil { + return nil + } + out := new(RepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryObservation. +func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { + if in == nil { + return nil + } + out := new(RepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryParameters. +func (in *RepositoryParameters) DeepCopy() *RepositoryParameters { + if in == nil { + return nil + } + out := new(RepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySpec) DeepCopyInto(out *RepositorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySpec. +func (in *RepositorySpec) DeepCopy() *RepositorySpec { + if in == nil { + return nil + } + out := new(RepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryStatus) DeepCopyInto(out *RepositoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryStatus. +func (in *RepositoryStatus) DeepCopy() *RepositoryStatus { + if in == nil { + return nil + } + out := new(RepositoryStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/snapshot/v1alpha1/zz_generated.managed.go b/apis/snapshot/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..0e9345a --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Repository. +func (mg *Repository) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Repository. +func (mg *Repository) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Repository. +func (mg *Repository) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Repository. +func (mg *Repository) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Repository. +func (mg *Repository) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Repository. +func (mg *Repository) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Repository. +func (mg *Repository) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Repository. +func (mg *Repository) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/snapshot/v1alpha1/zz_generated.managedlist.go b/apis/snapshot/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..29d2dc8 --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RepositoryList. +func (l *RepositoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/snapshot/v1alpha1/zz_groupversion_info.go b/apis/snapshot/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..fa6027f --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=snapshot.opensearch.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "snapshot.opensearch.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/snapshot/v1alpha1/zz_repository_terraformed.go b/apis/snapshot/v1alpha1/zz_repository_terraformed.go new file mode 100755 index 0000000..59b5a97 --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_repository_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Repository +func (mg *Repository) GetTerraformResourceType() string { + return "opensearch_snapshot_repository" +} + +// GetConnectionDetailsMapping for this Repository +func (tr *Repository) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Repository +func (tr *Repository) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Repository +func (tr *Repository) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Repository +func (tr *Repository) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Repository +func (tr *Repository) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Repository +func (tr *Repository) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Repository using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Repository) LateInitialize(attrs []byte) (bool, error) { + params := &RepositoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Repository) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/snapshot/v1alpha1/zz_repository_types.go b/apis/snapshot/v1alpha1/zz_repository_types.go new file mode 100755 index 0000000..4403ac3 --- /dev/null +++ b/apis/snapshot/v1alpha1/zz_repository_types.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RepositoryInitParameters struct { + + // (String) The name of the repository. + // The name of the repository. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Map of String) The settings map applicable for the backend, see official documentation for plugins. + // The settings map applicable for the backend, see official documentation for plugins. + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` + + // (String) The name of the repository backend . + // The name of the repository backend (required plugins must be installed). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RepositoryObservation struct { + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) The name of the repository. + // The name of the repository. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Map of String) The settings map applicable for the backend, see official documentation for plugins. + // The settings map applicable for the backend, see official documentation for plugins. + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` + + // (String) The name of the repository backend . + // The name of the repository backend (required plugins must be installed). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RepositoryParameters struct { + + // (String) The name of the repository. + // The name of the repository. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Map of String) The settings map applicable for the backend, see official documentation for plugins. + // The settings map applicable for the backend, see official documentation for plugins. + // +kubebuilder:validation:Optional + // +mapType=granular + Settings map[string]*string `json:"settings,omitempty" tf:"settings,omitempty"` + + // (String) The name of the repository backend . + // The name of the repository backend (required plugins must be installed). + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// RepositorySpec defines the desired state of Repository +type RepositorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RepositoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RepositoryInitParameters `json:"initProvider,omitempty"` +} + +// RepositoryStatus defines the observed state of Repository. +type RepositoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RepositoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Repository is the Schema for the Repositorys API. Provides an OpenSearch snapshot repository resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,opensearch} +type Repository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec RepositorySpec `json:"spec"` + Status RepositoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RepositoryList contains a list of Repositorys +type RepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Repository `json:"items"` +} + +// Repository type metadata. +var ( + Repository_Kind = "Repository" + Repository_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Repository_Kind}.String() + Repository_KindAPIVersion = Repository_Kind + "." + CRDGroupVersion.String() + Repository_GroupVersionKind = CRDGroupVersion.WithKind(Repository_Kind) +) + +func init() { + SchemeBuilder.Register(&Repository{}, &RepositoryList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index d90320c..1e39f14 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -10,7 +10,22 @@ package apis import ( "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "github.com/tagesjump/provider-opensearch/apis/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/anomaly/v1alpha1" + v1alpha1audit "github.com/tagesjump/provider-opensearch/apis/audit/v1alpha1" + v1alpha1channel "github.com/tagesjump/provider-opensearch/apis/channel/v1alpha1" + v1alpha1cluster "github.com/tagesjump/provider-opensearch/apis/cluster/v1alpha1" + v1alpha1component "github.com/tagesjump/provider-opensearch/apis/component/v1alpha1" + v1alpha1composable "github.com/tagesjump/provider-opensearch/apis/composable/v1alpha1" + v1alpha1dashboard "github.com/tagesjump/provider-opensearch/apis/dashboard/v1alpha1" + v1alpha1data "github.com/tagesjump/provider-opensearch/apis/data/v1alpha1" + v1alpha1index "github.com/tagesjump/provider-opensearch/apis/index/v1alpha1" + v1alpha1ingest "github.com/tagesjump/provider-opensearch/apis/ingest/v1alpha1" + v1alpha1ism "github.com/tagesjump/provider-opensearch/apis/ism/v1alpha1" + v1alpha1opensearch "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + v1alpha1roles "github.com/tagesjump/provider-opensearch/apis/roles/v1alpha1" + v1alpha1sm "github.com/tagesjump/provider-opensearch/apis/sm/v1alpha1" + v1alpha1snapshot "github.com/tagesjump/provider-opensearch/apis/snapshot/v1alpha1" + v1alpha1apis "github.com/tagesjump/provider-opensearch/apis/v1alpha1" v1beta1 "github.com/tagesjump/provider-opensearch/apis/v1beta1" ) @@ -18,6 +33,21 @@ func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme, + v1alpha1audit.SchemeBuilder.AddToScheme, + v1alpha1channel.SchemeBuilder.AddToScheme, + v1alpha1cluster.SchemeBuilder.AddToScheme, + v1alpha1component.SchemeBuilder.AddToScheme, + v1alpha1composable.SchemeBuilder.AddToScheme, + v1alpha1dashboard.SchemeBuilder.AddToScheme, + v1alpha1data.SchemeBuilder.AddToScheme, + v1alpha1index.SchemeBuilder.AddToScheme, + v1alpha1ingest.SchemeBuilder.AddToScheme, + v1alpha1ism.SchemeBuilder.AddToScheme, + v1alpha1opensearch.SchemeBuilder.AddToScheme, + v1alpha1roles.SchemeBuilder.AddToScheme, + v1alpha1sm.SchemeBuilder.AddToScheme, + v1alpha1snapshot.SchemeBuilder.AddToScheme, + v1alpha1apis.SchemeBuilder.AddToScheme, v1beta1.SchemeBuilder.AddToScheme, ) } diff --git a/config/external_name.go b/config/external_name.go index 2c2db11..733f43c 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -8,6 +8,27 @@ import ( // provider. var ExternalNameConfigs = map[string]config.ExternalName{ // Import requires using a randomly generated ID from provider: nl-2e21sda + "opensearch_anomaly_detection": config.IdentifierFromProvider, + "opensearch_audit_config": config.IdentifierFromProvider, + "opensearch_channel_configuration": config.IdentifierFromProvider, + "opensearch_cluster_settings": config.IdentifierFromProvider, + "opensearch_component_template": config.IdentifierFromProvider, + "opensearch_composable_index_template": config.IdentifierFromProvider, + "opensearch_dashboard_object": config.IdentifierFromProvider, + "opensearch_dashboard_tenant": config.IdentifierFromProvider, + "opensearch_data_stream": config.IdentifierFromProvider, + "opensearch_index": config.IdentifierFromProvider, + "opensearch_index_template": config.IdentifierFromProvider, + "opensearch_ingest_pipeline": config.IdentifierFromProvider, + "opensearch_ism_policy": config.IdentifierFromProvider, + "opensearch_ism_policy_mapping": config.IdentifierFromProvider, + "opensearch_monitor": config.IdentifierFromProvider, + "opensearch_role": config.IdentifierFromProvider, + "opensearch_roles_mapping": config.IdentifierFromProvider, + "opensearch_script": config.IdentifierFromProvider, + "opensearch_sm_policy": config.IdentifierFromProvider, + "opensearch_snapshot_repository": config.IdentifierFromProvider, + "opensearch_user": config.IdentifierFromProvider, } // ExternalNameConfigurations applies all external name configs listed in the diff --git a/config/opensearch/config.go b/config/opensearch/config.go new file mode 100644 index 0000000..3a47ddd --- /dev/null +++ b/config/opensearch/config.go @@ -0,0 +1,15 @@ +package opensearch + +import ( + "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-opensearch/config/opensearch" +) + +// Configure adds configurations for the opensearch group. +func Configure(p *config.Provider) {} diff --git a/config/provider.go b/config/provider.go index c81b2d8..4afeba4 100644 --- a/config/provider.go +++ b/config/provider.go @@ -5,6 +5,7 @@ import ( _ "embed" ujconfig "github.com/crossplane/upjet/pkg/config" + "github.com/tagesjump/provider-opensearch/config/roles" ) const ( @@ -29,7 +30,7 @@ func GetProvider() *ujconfig.Provider { )) for _, configure := range []func(provider *ujconfig.Provider){ - // add custom config functions + roles.Configure, } { configure(pc) } diff --git a/config/roles/config.go b/config/roles/config.go new file mode 100644 index 0000000..fc3082b --- /dev/null +++ b/config/roles/config.go @@ -0,0 +1,28 @@ +package roles + +import ( + "fmt" + + "github.com/crossplane/upjet/pkg/config" + "github.com/tagesjump/provider-opensearch/config/opensearch" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-opensearch/apis/roles/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-opensearch/config/roles" +) + +// Configure adds configurations for the roles group. +func Configure(p *config.Provider) { + p.AddResourceConfigurator("opensearch_roles_mapping", func(r *config.Resource) { + r.References["role_name"] = config.Reference{ + Type: fmt.Sprintf("%s.%s", opensearch.ApisPackagePath, "Role"), + } + r.References["users"] = config.Reference{ + Type: fmt.Sprintf("%s.%s", opensearch.ApisPackagePath, "User"), + } + }) + +} diff --git a/examples-generated/anomaly/v1alpha1/detection.yaml b/examples-generated/anomaly/v1alpha1/detection.yaml new file mode 100644 index 0000000..6b981d6 --- /dev/null +++ b/examples-generated/anomaly/v1alpha1/detection.yaml @@ -0,0 +1,60 @@ +apiVersion: anomaly.opensearch.upbound.io/v1alpha1 +kind: Detection +metadata: + annotations: + meta.upbound.io/example-id: anomaly/v1alpha1/detection + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + body: | + { + "name": "foo", + "description": "Test detector", + "time_field": "@timestamp", + "indices": [ + "security-auditlog*" + ], + "feature_attributes": [ + { + "feature_name": "test", + "feature_enabled": true, + "aggregation_query": { + "test": { + "value_count": { + "field": "audit_category.keyword" + } + } + } + } + ], + "filter_query": { + "bool": { + "filter": [ + { + "range": { + "value": { + "gt": 1 + } + } + } + ], + "adjust_pure_negative": true, + "boost": 1 + } + }, + "detection_interval": { + "period": { + "interval": 1, + "unit": "Minutes" + } + }, + "window_delay": { + "period": { + "interval": 1, + "unit": "Minutes" + } + }, + "result_index" : "opensearch-ad-plugin-result-test" + } diff --git a/examples-generated/audit/v1alpha1/config.yaml b/examples-generated/audit/v1alpha1/config.yaml new file mode 100644 index 0000000..c1f7e30 --- /dev/null +++ b/examples-generated/audit/v1alpha1/config.yaml @@ -0,0 +1,54 @@ +apiVersion: audit.opensearch.upbound.io/v1alpha1 +kind: Config +metadata: + annotations: + meta.upbound.io/example-id: audit/v1alpha1/config + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + audit: + - disabledRestCategories: + - GRANTED_PRIVILEGES + - AUTHENTICATED + disabledTransportCategories: + - GRANTED_PRIVILEGES + - AUTHENTICATED + enableRest: true + enableTransport: true + excludeSensitiveHeaders: true + ignoreRequests: + - SearchRequest + - indices:data/read/* + - /_cluster/health + ignoreUsers: + - dashboardserver + logRequestBody: true + resolveBulkRequests: true + resolveIndices: true + compliance: + - enabled: true + externalConfig: false + internalConfig: true + readIgnoreUsers: + - read-ignore-1 + readMetadataOnly: true + readWatchedField: + - fields: + - field-1 + - field-2 + index: read-index-1 + - fields: + - field-3 + index: read-index-2 + writeIgnoreUsers: + - write-ignore-1 + writeLogDiffs: false + writeMetadataOnly: true + writeWatchedIndices: + - write-index-1 + - write-index-2 + - log-* + - '*' + enabled: true diff --git a/examples-generated/cluster/v1alpha1/settings.yaml b/examples-generated/cluster/v1alpha1/settings.yaml new file mode 100644 index 0000000..f4a1260 --- /dev/null +++ b/examples-generated/cluster/v1alpha1/settings.yaml @@ -0,0 +1,12 @@ +apiVersion: cluster.opensearch.upbound.io/v1alpha1 +kind: Settings +metadata: + annotations: + meta.upbound.io/example-id: cluster/v1alpha1/settings + labels: + testing.upbound.io/example-name: global + name: global +spec: + forProvider: + actionAutoCreateIndex: my-index-000001,index10,-index1*,+ind* + clusterMaxShardsPerNode: 10 diff --git a/examples-generated/component/v1alpha1/template.yaml b/examples-generated/component/v1alpha1/template.yaml new file mode 100644 index 0000000..6992abd --- /dev/null +++ b/examples-generated/component/v1alpha1/template.yaml @@ -0,0 +1,35 @@ +apiVersion: component.opensearch.upbound.io/v1alpha1 +kind: Template +metadata: + annotations: + meta.upbound.io/example-id: component/v1alpha1/template + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + body: | + { + "template": { + "settings": { + "index": { + "number_of_shards": 1 + } + }, + "mappings": { + "properties": { + "host_name": { + "type": "keyword" + }, + "created_at": { + "type": "date", + "format": "EEE MMM dd HH:mm:ss Z yyyy" + } + } + }, + "aliases": { + "mydata": { } + } + } + } + name: terraform-test diff --git a/examples-generated/composable/v1alpha1/indextemplate.yaml b/examples-generated/composable/v1alpha1/indextemplate.yaml new file mode 100644 index 0000000..3c02e6c --- /dev/null +++ b/examples-generated/composable/v1alpha1/indextemplate.yaml @@ -0,0 +1,38 @@ +apiVersion: composable.opensearch.upbound.io/v1alpha1 +kind: IndexTemplate +metadata: + annotations: + meta.upbound.io/example-id: composable/v1alpha1/indextemplate + labels: + testing.upbound.io/example-name: template_1 + name: template-1 +spec: + forProvider: + body: | + { + "index_patterns": ["te*", "bar*"], + "template": { + "settings": { + "index": { + "number_of_shards": 1 + } + }, + "mappings": { + "properties": { + "host_name": { + "type": "keyword" + }, + "created_at": { + "type": "date", + "format": "EEE MMM dd HH:mm:ss Z yyyy" + } + } + }, + "aliases": { + "mydata": { } + } + }, + "priority": 200, + "version": 3 + } + name: template_1 diff --git a/examples-generated/dashboard/v1alpha1/object.yaml b/examples-generated/dashboard/v1alpha1/object.yaml new file mode 100644 index 0000000..04e3918 --- /dev/null +++ b/examples-generated/dashboard/v1alpha1/object.yaml @@ -0,0 +1,27 @@ +apiVersion: dashboard.opensearch.upbound.io/v1alpha1 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: dashboard/v1alpha1/object + labels: + testing.upbound.io/example-name: test_visualization_v6 + name: test-visualization-v6 +spec: + forProvider: + body: | + [ + { + "_id": "visualization:response-time-percentile", + "_type": "doc", + "_source": { + "type": "visualization", + "visualization": { + "title": "Total response time percentiles", + "visState": "{\"title\":\"Total response time percentiles\",\"type\":\"line\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"showCircles\":true,\"interpolate\":\"linear\",\"scale\":\"linear\",\"drawLinesBetweenPoints\":true,\"radiusRatio\":9,\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"percentiles\",\"schema\":\"metric\",\"params\":{\"field\":\"app.total_time\",\"percents\":[50,90,95]}},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"schema\":\"segment\",\"params\":{\"field\":\"@timestamp\",\"interval\":\"auto\",\"customInterval\":\"2h\",\"min_doc_count\":1,\"extended_bounds\":{}}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"system.syslog.program\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"_term\"}}],\"listeners\":{}}", + "uiStateJSON": "{}", + "description": "", + "version": 1 + } + } + } + ] diff --git a/examples-generated/dashboard/v1alpha1/tenant.yaml b/examples-generated/dashboard/v1alpha1/tenant.yaml new file mode 100644 index 0000000..4a32377 --- /dev/null +++ b/examples-generated/dashboard/v1alpha1/tenant.yaml @@ -0,0 +1,12 @@ +apiVersion: dashboard.opensearch.upbound.io/v1alpha1 +kind: Tenant +metadata: + annotations: + meta.upbound.io/example-id: dashboard/v1alpha1/tenant + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + description: test tenant + tenantName: test diff --git a/examples-generated/data/v1alpha1/stream.yaml b/examples-generated/data/v1alpha1/stream.yaml new file mode 100644 index 0000000..e8aedbf --- /dev/null +++ b/examples-generated/data/v1alpha1/stream.yaml @@ -0,0 +1,30 @@ +apiVersion: data.opensearch.upbound.io/v1alpha1 +kind: Stream +metadata: + annotations: + meta.upbound.io/example-id: data/v1alpha1/stream + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + name: foo-data-stream + +--- + +apiVersion: composable.opensearch.upbound.io/v1alpha1 +kind: IndexTemplate +metadata: + annotations: + meta.upbound.io/example-id: data/v1alpha1/stream + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + body: | + { + "index_patterns": ["foo-data-stream*"], + "data_stream": {} + } + name: foo-template diff --git a/examples-generated/index/v1alpha1/template.yaml b/examples-generated/index/v1alpha1/template.yaml new file mode 100644 index 0000000..b98b426 --- /dev/null +++ b/examples-generated/index/v1alpha1/template.yaml @@ -0,0 +1,34 @@ +apiVersion: index.opensearch.upbound.io/v1alpha1 +kind: Template +metadata: + annotations: + meta.upbound.io/example-id: index/v1alpha1/template + labels: + testing.upbound.io/example-name: template_1 + name: template-1 +spec: + forProvider: + body: | + { + "template": "te*", + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "type1": { + "_source": { + "enabled": false + }, + "properties": { + "host_name": { + "type": "keyword" + }, + "created_at": { + "type": "date", + "format": "EEE MMM dd HH:mm:ss Z YYYY" + } + } + } + } + } + name: template_1 diff --git a/examples-generated/ingest/v1alpha1/pipeline.yaml b/examples-generated/ingest/v1alpha1/pipeline.yaml new file mode 100644 index 0000000..7397858 --- /dev/null +++ b/examples-generated/ingest/v1alpha1/pipeline.yaml @@ -0,0 +1,24 @@ +apiVersion: ingest.opensearch.upbound.io/v1alpha1 +kind: Pipeline +metadata: + annotations: + meta.upbound.io/example-id: ingest/v1alpha1/pipeline + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + body: | + { + "description" : "describe pipeline", + "version": 123, + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + } + ] + } + name: terraform-test diff --git a/examples-generated/ism/v1alpha1/policy.yaml b/examples-generated/ism/v1alpha1/policy.yaml new file mode 100644 index 0000000..ec45358 --- /dev/null +++ b/examples-generated/ism/v1alpha1/policy.yaml @@ -0,0 +1,12 @@ +apiVersion: ism.opensearch.upbound.io/v1alpha1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: ism/v1alpha1/policy + labels: + testing.upbound.io/example-name: cleanup + name: cleanup +spec: + forProvider: + body: ${file("${path.module}/policies/delete_after_15d.json")} + policyId: delete_after_15d diff --git a/examples-generated/ism/v1alpha1/policymapping.yaml b/examples-generated/ism/v1alpha1/policymapping.yaml new file mode 100644 index 0000000..cb7f0e8 --- /dev/null +++ b/examples-generated/ism/v1alpha1/policymapping.yaml @@ -0,0 +1,13 @@ +apiVersion: ism.opensearch.upbound.io/v1alpha1 +kind: PolicyMapping +metadata: + annotations: + meta.upbound.io/example-id: ism/v1alpha1/policymapping + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + indexes: test_index + policyId: policy_1 + state: delete diff --git a/examples-generated/opensearch/v1alpha1/index.yaml b/examples-generated/opensearch/v1alpha1/index.yaml new file mode 100644 index 0000000..40df291 --- /dev/null +++ b/examples-generated/opensearch/v1alpha1/index.yaml @@ -0,0 +1,26 @@ +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: Index +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/index + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + mappings: | + { + "people": { + "_all": { + "enabled": false + }, + "properties": { + "email": { + "type": "text" + } + } + } + } + name: terraform-test + numberOfReplicas: 1 + numberOfShards: 1 diff --git a/examples-generated/opensearch/v1alpha1/monitor.yaml b/examples-generated/opensearch/v1alpha1/monitor.yaml new file mode 100644 index 0000000..ca89205 --- /dev/null +++ b/examples-generated/opensearch/v1alpha1/monitor.yaml @@ -0,0 +1,76 @@ +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: Monitor +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/monitor + labels: + testing.upbound.io/example-name: movies_last_hour + name: movies-last-hour +spec: + forProvider: + body: | + { + "name": "test-monitor", + "type": "monitor", + "enabled": true, + "schedule": { + "period": { + "interval": 1, + "unit": "MINUTES" + } + }, + "inputs": [{ + "search": { + "indices": ["movies"], + "query": { + "size": 0, + "aggregations": {}, + "query": { + "bool": { + "adjust_pure_negative":true, + "boost":1, + "filter": [{ + "range": { + "@timestamp": { + "boost":1, + "from":"||-1h", + "to":"", + "include_lower":true, + "include_upper":true, + "format": "epoch_millis" + } + } + }] + } + } + } + } + }], + "triggers": [ + { + "name" : "Errors", + "severity" : "1", + "condition" : { + "script" : { + "source" : "ctx.results[0].hits.total.value > 0", + "lang" : "painless" + } + }, + "actions" : [ + { + "name" : "Slack", + "destination_id" : "${opensearch_destination.slack_on_call_channel.id}", + "message_template" : { + "source" : "bogus", + "lang" : "mustache" + }, + "throttle_enabled" : false, + "subject_template" : { + "source" : "Production Errors", + "lang" : "mustache" + } + } + ] + } + ] + } diff --git a/examples-generated/opensearch/v1alpha1/role.yaml b/examples-generated/opensearch/v1alpha1/role.yaml new file mode 100644 index 0000000..b9cef10 --- /dev/null +++ b/examples-generated/opensearch/v1alpha1/role.yaml @@ -0,0 +1,24 @@ +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/role + labels: + testing.upbound.io/example-name: writer + name: writer +spec: + forProvider: + clusterPermissions: + - '*' + description: Logs writer role + indexPermissions: + - allowedActions: + - write + indexPatterns: + - logstash-* + roleName: logs_writer + tenantPermissions: + - allowedActions: + - write + tenantPatterns: + - logstash-* diff --git a/examples-generated/opensearch/v1alpha1/script.yaml b/examples-generated/opensearch/v1alpha1/script.yaml new file mode 100644 index 0000000..30b9397 --- /dev/null +++ b/examples-generated/opensearch/v1alpha1/script.yaml @@ -0,0 +1,13 @@ +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: Script +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/script + labels: + testing.upbound.io/example-name: test_script + name: test-script +spec: + forProvider: + lang: painless + scriptId: my_script + source: Math.log(_score * 2) + params.my_modifier diff --git a/examples-generated/opensearch/v1alpha1/user.yaml b/examples-generated/opensearch/v1alpha1/user.yaml new file mode 100644 index 0000000..033bc4d --- /dev/null +++ b/examples-generated/opensearch/v1alpha1/user.yaml @@ -0,0 +1,57 @@ +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/user + labels: + testing.upbound.io/example-name: mapper + name: mapper +spec: + forProvider: + description: a reader role for our app + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: app-reasdder + +--- + +apiVersion: opensearch.opensearch.upbound.io/v1alpha1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/user + labels: + testing.upbound.io/example-name: reader + name: reader +spec: + forProvider: + description: App Reader Role + indexPermissions: + - allowedActions: + - get + - read + - search + indexPatterns: + - app-* + roleName: app_reader + +--- + +apiVersion: roles.opensearch.upbound.io/v1alpha1 +kind: Mapping +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1alpha1/user + labels: + testing.upbound.io/example-name: reader + name: reader +spec: + forProvider: + description: App Reader Role + roleNameSelector: + matchLabels: + testing.upbound.io/example-name: reader + usersRefs: + - name: reader diff --git a/examples-generated/roles/v1alpha1/mapping.yaml b/examples-generated/roles/v1alpha1/mapping.yaml new file mode 100644 index 0000000..f54df04 --- /dev/null +++ b/examples-generated/roles/v1alpha1/mapping.yaml @@ -0,0 +1,17 @@ +apiVersion: roles.opensearch.upbound.io/v1alpha1 +kind: Mapping +metadata: + annotations: + meta.upbound.io/example-id: roles/v1alpha1/mapping + labels: + testing.upbound.io/example-name: mapper + name: mapper +spec: + forProvider: + backendRoles: + - arn:aws:iam::123456789012:role/lambda-call-opensearch + - arn:aws:iam::123456789012:role/run-containers + description: Mapping AWS IAM roles to ES role + roleNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sm/v1alpha1/policy.yaml b/examples-generated/sm/v1alpha1/policy.yaml new file mode 100644 index 0000000..ac118b2 --- /dev/null +++ b/examples-generated/sm/v1alpha1/policy.yaml @@ -0,0 +1,70 @@ +apiVersion: sm.opensearch.upbound.io/v1alpha1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: sm/v1alpha1/policy + labels: + testing.upbound.io/example-name: snapshot_to_s3 + name: snapshot-to-s3 +spec: + forProvider: + body: |- + ${jsonencode({ + "enabled" = true + "description" = "My snapshot policy" + + "creation" = { + "schedule" = { + "cron" = { + "expression" = "0 0 * * *" + "timezone" = "UTC" + } + } + + "time_limit" = "1h" + } + + "deletion" = { + "schedule" = { + "cron" = { + "expression" = "0 0 * * *" + "timezone" = "UTC" + } + } + + "condition" = { + "max_age" = "14d" + "max_count" = 400 + "min_count" = 1 + } + + "time_limit" = "1h" + } + + "snapshot_config" = { + "timezone" = "UTC" + "indices" = "*" + "repository" = opensearch_snapshot_repository.repo.name + } + })} + policyName: snapshot_to_s3 + +--- + +apiVersion: snapshot.opensearch.upbound.io/v1alpha1 +kind: Repository +metadata: + annotations: + meta.upbound.io/example-id: sm/v1alpha1/policy + labels: + testing.upbound.io/example-name: repo + name: repo +spec: + forProvider: + name: os-index-backups + settings: + bucket: ${module.s3_snapshot.s3_bucket_id} + region: ${module.s3_snapshot.s3_bucket_region} + role_arn: ${aws_iam_role.snapshot_create.arn} + server_side_encryption: true + type: s3 diff --git a/examples-generated/snapshot/v1alpha1/repository.yaml b/examples-generated/snapshot/v1alpha1/repository.yaml new file mode 100644 index 0000000..903d967 --- /dev/null +++ b/examples-generated/snapshot/v1alpha1/repository.yaml @@ -0,0 +1,16 @@ +apiVersion: snapshot.opensearch.upbound.io/v1alpha1 +kind: Repository +metadata: + annotations: + meta.upbound.io/example-id: snapshot/v1alpha1/repository + labels: + testing.upbound.io/example-name: repo + name: repo +spec: + forProvider: + name: es-index-backups + settings: + bucket: es-index-backups + region: us-east-1 + role_arn: arn:aws:iam::123456789012:role/MyRole + type: s3 diff --git a/go.mod b/go.mod index 729f3f2..5d06796 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/tagesjump/provider-opensearch go 1.22 require ( + dario.cat/mergo v1.0.0 github.com/crossplane/crossplane-runtime v1.15.0 github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 github.com/crossplane/upjet v1.2.0 @@ -15,7 +16,6 @@ require ( ) require ( - dario.cat/mergo v1.0.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect diff --git a/internal/clients/opensearch.go b/internal/clients/opensearch.go index e2ff9c9..ade24e6 100644 --- a/internal/clients/opensearch.go +++ b/internal/clients/opensearch.go @@ -75,6 +75,7 @@ const ( errTrackUsage = "cannot track ProviderConfig usage" errExtractCredentials = "cannot extract credentials" errUnmarshalCredentials = "cannot unmarshal opensearch credentials as JSON" + errNoRequiredFieldURL = "Missing required field - url" ) // TerraformSetupBuilder builds Terraform a terraform.SetupFn function which @@ -115,11 +116,17 @@ func TerraformSetupBuilder(version, providerSource, providerVersion string) terr // set provider configuration ps.Configuration = map[string]any{} + if value, ok := creds[url]; ok { + ps.Configuration[url] = value + } else { + return ps, errors.New(errNoRequiredFieldURL) + } + for _, setting := range []string{ awsAccessKey, awsAssumeRoleArn, awsAssumeRoleExternalId, awsProfile, awsRegion, awsSecretKey, awsSignatureService, awsToken, cacertFile, clientCertPath, clientKeyPath, healthcheck, hostOverride, insecure, opensearchVersion, password, proxy, signAwsRequests, - sniff, token, tokenName, username,versionPingTimeout, + sniff, token, tokenName, username, versionPingTimeout, } { if value, ok := creds[setting]; ok { ps.Configuration[setting] = value diff --git a/internal/controller/anomaly/detection/zz_controller.go b/internal/controller/anomaly/detection/zz_controller.go new file mode 100755 index 0000000..46fb292 --- /dev/null +++ b/internal/controller/anomaly/detection/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package detection + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/anomaly/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Detection managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Detection_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Detection_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Detection_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_anomaly_detection"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Detection + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Detection{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Detection") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Detection_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Detection{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/audit/config/zz_controller.go b/internal/controller/audit/config/zz_controller.go new file mode 100755 index 0000000..a607ef6 --- /dev/null +++ b/internal/controller/audit/config/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package config + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/audit/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Config managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Config_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Config_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Config_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_audit_config"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Config + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Config{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Config") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Config_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Config{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/channel/configuration/zz_controller.go b/internal/controller/channel/configuration/zz_controller.go new file mode 100755 index 0000000..358cb97 --- /dev/null +++ b/internal/controller/channel/configuration/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package configuration + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/channel/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Configuration managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Configuration_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Configuration_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Configuration_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_channel_configuration"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Configuration + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Configuration{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Configuration") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Configuration_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Configuration{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/cluster/settings/zz_controller.go b/internal/controller/cluster/settings/zz_controller.go new file mode 100755 index 0000000..4bd72ac --- /dev/null +++ b/internal/controller/cluster/settings/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package settings + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/cluster/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Settings managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Settings_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Settings_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Settings_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_cluster_settings"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Settings + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Settings{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Settings") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Settings_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Settings{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/component/template/zz_controller.go b/internal/controller/component/template/zz_controller.go new file mode 100755 index 0000000..4bf5fc7 --- /dev/null +++ b/internal/controller/component/template/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package template + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/component/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Template managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Template_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Template_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Template_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_component_template"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Template + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Template{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Template") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Template_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Template{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/composable/indextemplate/zz_controller.go b/internal/controller/composable/indextemplate/zz_controller.go new file mode 100755 index 0000000..6ee0484 --- /dev/null +++ b/internal/controller/composable/indextemplate/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package indextemplate + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/composable/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles IndexTemplate managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.IndexTemplate_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.IndexTemplate_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.IndexTemplate_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_composable_index_template"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.IndexTemplate + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.IndexTemplate{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.IndexTemplate") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.IndexTemplate_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.IndexTemplate{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dashboard/object/zz_controller.go b/internal/controller/dashboard/object/zz_controller.go new file mode 100755 index 0000000..0bc7f4b --- /dev/null +++ b/internal/controller/dashboard/object/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package object + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/dashboard/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Object managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Object_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Object_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Object_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_dashboard_object"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Object + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Object{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Object") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Object_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Object{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dashboard/tenant/zz_controller.go b/internal/controller/dashboard/tenant/zz_controller.go new file mode 100755 index 0000000..3b568a1 --- /dev/null +++ b/internal/controller/dashboard/tenant/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package tenant + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/dashboard/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Tenant managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Tenant_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Tenant_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Tenant_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_dashboard_tenant"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Tenant + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Tenant{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Tenant") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Tenant_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Tenant{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/data/stream/zz_controller.go b/internal/controller/data/stream/zz_controller.go new file mode 100755 index 0000000..1eab87b --- /dev/null +++ b/internal/controller/data/stream/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package stream + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/data/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Stream managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Stream_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Stream_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Stream_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_data_stream"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Stream + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Stream{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Stream") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Stream_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Stream{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/index/template/zz_controller.go b/internal/controller/index/template/zz_controller.go new file mode 100755 index 0000000..bf2e977 --- /dev/null +++ b/internal/controller/index/template/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package template + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/index/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Template managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Template_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Template_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Template_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_index_template"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Template + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Template{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Template") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Template_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Template{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ingest/pipeline/zz_controller.go b/internal/controller/ingest/pipeline/zz_controller.go new file mode 100755 index 0000000..2293c7d --- /dev/null +++ b/internal/controller/ingest/pipeline/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package pipeline + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/ingest/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Pipeline managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Pipeline_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Pipeline_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Pipeline_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_ingest_pipeline"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Pipeline + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Pipeline{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Pipeline") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Pipeline_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Pipeline{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ism/policy/zz_controller.go b/internal/controller/ism/policy/zz_controller.go new file mode 100755 index 0000000..baa130d --- /dev/null +++ b/internal/controller/ism/policy/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package policy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/ism/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Policy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Policy_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Policy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_ism_policy"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Policy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Policy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Policy") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Policy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/ism/policymapping/zz_controller.go b/internal/controller/ism/policymapping/zz_controller.go new file mode 100755 index 0000000..b564055 --- /dev/null +++ b/internal/controller/ism/policymapping/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package policymapping + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/ism/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles PolicyMapping managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PolicyMapping_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PolicyMapping_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PolicyMapping_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_ism_policy_mapping"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.PolicyMapping + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PolicyMapping{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PolicyMapping") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PolicyMapping_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PolicyMapping{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/opensearch/index/zz_controller.go b/internal/controller/opensearch/index/zz_controller.go new file mode 100755 index 0000000..969c6ae --- /dev/null +++ b/internal/controller/opensearch/index/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package index + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Index managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Index_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Index_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Index_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_index"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Index + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Index{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Index") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Index_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Index{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/opensearch/monitor/zz_controller.go b/internal/controller/opensearch/monitor/zz_controller.go new file mode 100755 index 0000000..dd88d94 --- /dev/null +++ b/internal/controller/opensearch/monitor/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package monitor + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Monitor managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Monitor_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Monitor_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Monitor_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_monitor"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Monitor + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Monitor{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Monitor") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Monitor_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Monitor{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/opensearch/role/zz_controller.go b/internal/controller/opensearch/role/zz_controller.go new file mode 100755 index 0000000..23382d8 --- /dev/null +++ b/internal/controller/opensearch/role/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package role + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Role managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Role_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Role_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Role_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_role"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Role + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Role{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Role") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Role_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Role{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/opensearch/script/zz_controller.go b/internal/controller/opensearch/script/zz_controller.go new file mode 100755 index 0000000..c7281f4 --- /dev/null +++ b/internal/controller/opensearch/script/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package script + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Script managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Script_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Script_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Script_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_script"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Script + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Script{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Script") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Script_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Script{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/opensearch/user/zz_controller.go b/internal/controller/opensearch/user/zz_controller.go new file mode 100755 index 0000000..f97def0 --- /dev/null +++ b/internal/controller/opensearch/user/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package user + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/opensearch/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles User managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.User_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.User_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.User_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.User + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.User{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.User") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.User_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.User{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/roles/mapping/zz_controller.go b/internal/controller/roles/mapping/zz_controller.go new file mode 100755 index 0000000..0cf8940 --- /dev/null +++ b/internal/controller/roles/mapping/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package mapping + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/roles/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Mapping managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Mapping_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Mapping_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Mapping_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_roles_mapping"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Mapping + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Mapping{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Mapping") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Mapping_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Mapping{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/sm/policy/zz_controller.go b/internal/controller/sm/policy/zz_controller.go new file mode 100755 index 0000000..74a1a54 --- /dev/null +++ b/internal/controller/sm/policy/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package policy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/sm/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Policy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Policy_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Policy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_sm_policy"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Policy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Policy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Policy") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Policy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/snapshot/repository/zz_controller.go b/internal/controller/snapshot/repository/zz_controller.go new file mode 100755 index 0000000..9d7f82b --- /dev/null +++ b/internal/controller/snapshot/repository/zz_controller.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package repository + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-opensearch/apis/snapshot/v1alpha1" + features "github.com/tagesjump/provider-opensearch/internal/features" +) + +// Setup adds a controller that reconciles Repository managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Repository_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Repository_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Repository_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["opensearch_snapshot_repository"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + + // register webhooks for the kind v1alpha1.Repository + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Repository{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Repository") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Repository_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Repository{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 4114dae..da9a3bc 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -9,14 +9,56 @@ import ( "github.com/crossplane/upjet/pkg/controller" + detection "github.com/tagesjump/provider-opensearch/internal/controller/anomaly/detection" + config "github.com/tagesjump/provider-opensearch/internal/controller/audit/config" + configuration "github.com/tagesjump/provider-opensearch/internal/controller/channel/configuration" + settings "github.com/tagesjump/provider-opensearch/internal/controller/cluster/settings" + template "github.com/tagesjump/provider-opensearch/internal/controller/component/template" + indextemplate "github.com/tagesjump/provider-opensearch/internal/controller/composable/indextemplate" + object "github.com/tagesjump/provider-opensearch/internal/controller/dashboard/object" + tenant "github.com/tagesjump/provider-opensearch/internal/controller/dashboard/tenant" + stream "github.com/tagesjump/provider-opensearch/internal/controller/data/stream" + templateindex "github.com/tagesjump/provider-opensearch/internal/controller/index/template" + pipeline "github.com/tagesjump/provider-opensearch/internal/controller/ingest/pipeline" + policy "github.com/tagesjump/provider-opensearch/internal/controller/ism/policy" + policymapping "github.com/tagesjump/provider-opensearch/internal/controller/ism/policymapping" + index "github.com/tagesjump/provider-opensearch/internal/controller/opensearch/index" + monitor "github.com/tagesjump/provider-opensearch/internal/controller/opensearch/monitor" + role "github.com/tagesjump/provider-opensearch/internal/controller/opensearch/role" + script "github.com/tagesjump/provider-opensearch/internal/controller/opensearch/script" + user "github.com/tagesjump/provider-opensearch/internal/controller/opensearch/user" providerconfig "github.com/tagesjump/provider-opensearch/internal/controller/providerconfig" + mapping "github.com/tagesjump/provider-opensearch/internal/controller/roles/mapping" + policysm "github.com/tagesjump/provider-opensearch/internal/controller/sm/policy" + repository "github.com/tagesjump/provider-opensearch/internal/controller/snapshot/repository" ) // Setup creates all controllers with the supplied logger and adds them to // the supplied manager. func Setup(mgr ctrl.Manager, o controller.Options) error { for _, setup := range []func(ctrl.Manager, controller.Options) error{ + detection.Setup, + config.Setup, + configuration.Setup, + settings.Setup, + template.Setup, + indextemplate.Setup, + object.Setup, + tenant.Setup, + stream.Setup, + templateindex.Setup, + pipeline.Setup, + policy.Setup, + policymapping.Setup, + index.Setup, + monitor.Setup, + role.Setup, + script.Setup, + user.Setup, providerconfig.Setup, + mapping.Setup, + policysm.Setup, + repository.Setup, } { if err := setup(mgr, o); err != nil { return err diff --git a/package/crds/anomaly.opensearch.upbound.io_detections.yaml b/package/crds/anomaly.opensearch.upbound.io_detections.yaml new file mode 100644 index 0000000..e1c6652 --- /dev/null +++ b/package/crds/anomaly.opensearch.upbound.io_detections.yaml @@ -0,0 +1,334 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: detections.anomaly.opensearch.upbound.io +spec: + group: anomaly.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Detection + listKind: DetectionList + plural: detections + singular: detection + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Detection is the Schema for the Detections API. Provides an OpenSearch + anonaly detection. Please refer to the OpenSearch anomaly detection documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DetectionSpec defines the desired state of Detection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The anomaly detection document + The anomaly detection document + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The anomaly detection document + The anomaly detection document + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + status: + description: DetectionStatus defines the observed state of Detection. + properties: + atProvider: + properties: + body: + description: |- + (String) The anomaly detection document + The anomaly detection document + type: string + id: + description: (String) The ID of this resource. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/audit.opensearch.upbound.io_configs.yaml b/package/crds/audit.opensearch.upbound.io_configs.yaml new file mode 100644 index 0000000..d8819f2 --- /dev/null +++ b/package/crds/audit.opensearch.upbound.io_configs.yaml @@ -0,0 +1,641 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: configs.audit.opensearch.upbound.io +spec: + group: audit.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Config is the Schema for the Configs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigSpec defines the desired state of Config + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + audit: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + disabledRestCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + disabledTransportCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + enableRest: + description: (Boolean) + type: boolean + enableTransport: + description: (Boolean) + type: boolean + excludeSensitiveHeaders: + description: (Boolean) + type: boolean + ignoreRequests: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + ignoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + logRequestBody: + description: (Boolean) + type: boolean + resolveBulkRequests: + description: (Boolean) + type: boolean + resolveIndices: + description: (Boolean) + type: boolean + type: object + type: array + compliance: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + enabled: + description: (Boolean) + type: boolean + externalConfig: + description: (Boolean) + type: boolean + internalConfig: + description: (Boolean) + type: boolean + readIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + readMetadataOnly: + description: (Boolean) + type: boolean + readWatchedField: + description: (Block Set) (see below for nested schema) + items: + properties: + fields: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + index: + description: (String) + type: string + type: object + type: array + writeIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + writeLogDiffs: + description: (Boolean) + type: boolean + writeMetadataOnly: + description: (Boolean) + type: boolean + writeWatchedIndices: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + enabled: + description: (Boolean) + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + audit: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + disabledRestCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + disabledTransportCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + enableRest: + description: (Boolean) + type: boolean + enableTransport: + description: (Boolean) + type: boolean + excludeSensitiveHeaders: + description: (Boolean) + type: boolean + ignoreRequests: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + ignoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + logRequestBody: + description: (Boolean) + type: boolean + resolveBulkRequests: + description: (Boolean) + type: boolean + resolveIndices: + description: (Boolean) + type: boolean + type: object + type: array + compliance: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + enabled: + description: (Boolean) + type: boolean + externalConfig: + description: (Boolean) + type: boolean + internalConfig: + description: (Boolean) + type: boolean + readIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + readMetadataOnly: + description: (Boolean) + type: boolean + readWatchedField: + description: (Block Set) (see below for nested schema) + items: + properties: + fields: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + index: + description: (String) + type: string + type: object + type: array + writeIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + writeLogDiffs: + description: (Boolean) + type: boolean + writeMetadataOnly: + description: (Boolean) + type: boolean + writeWatchedIndices: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + enabled: + description: (Boolean) + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.enabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.enabled) + || (has(self.initProvider) && has(self.initProvider.enabled))' + status: + description: ConfigStatus defines the observed state of Config. + properties: + atProvider: + properties: + audit: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + disabledRestCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + disabledTransportCategories: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + enableRest: + description: (Boolean) + type: boolean + enableTransport: + description: (Boolean) + type: boolean + excludeSensitiveHeaders: + description: (Boolean) + type: boolean + ignoreRequests: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + ignoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + logRequestBody: + description: (Boolean) + type: boolean + resolveBulkRequests: + description: (Boolean) + type: boolean + resolveIndices: + description: (Boolean) + type: boolean + type: object + type: array + compliance: + description: '(Block Set, Max: 1) (see below for nested schema)' + items: + properties: + enabled: + description: (Boolean) + type: boolean + externalConfig: + description: (Boolean) + type: boolean + internalConfig: + description: (Boolean) + type: boolean + readIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + readMetadataOnly: + description: (Boolean) + type: boolean + readWatchedField: + description: (Block Set) (see below for nested schema) + items: + properties: + fields: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + index: + description: (String) + type: string + type: object + type: array + writeIgnoreUsers: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + writeLogDiffs: + description: (Boolean) + type: boolean + writeMetadataOnly: + description: (Boolean) + type: boolean + writeWatchedIndices: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + enabled: + description: (Boolean) + type: boolean + id: + description: (String) The ID of this resource. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/channel.opensearch.upbound.io_configurations.yaml b/package/crds/channel.opensearch.upbound.io_configurations.yaml new file mode 100644 index 0000000..03b15f8 --- /dev/null +++ b/package/crds/channel.opensearch.upbound.io_configurations.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: configurations.channel.opensearch.upbound.io +spec: + group: channel.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Configuration + listKind: ConfigurationList + plural: configurations + singular: configuration + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Configuration is the Schema for the Configurations API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationSpec defines the desired state of Configuration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: The channel configuration document + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: The channel configuration document + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + status: + description: ConfigurationStatus defines the observed state of Configuration. + properties: + atProvider: + properties: + body: + description: The channel configuration document + type: string + id: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cluster.opensearch.upbound.io_settings.yaml b/package/crds/cluster.opensearch.upbound.io_settings.yaml new file mode 100644 index 0000000..8b3937e --- /dev/null +++ b/package/crds/cluster.opensearch.upbound.io_settings.yaml @@ -0,0 +1,898 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: settings.cluster.opensearch.upbound.io +spec: + group: cluster.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Settings + listKind: SettingsList + plural: settings + singular: settings + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Settings is the Schema for the Settingss API. Manages a cluster's + (persistent) settings. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SettingsSpec defines the desired state of Settings + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actionAutoCreateIndex: + description: |- + (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + Whether to automatically create an index if it doesn’t already exist and apply any configured index template + type: string + actionDestructiveRequiresName: + description: |- + (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + type: boolean + clusterBlocksReadOnly: + description: |- + (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + Make the whole cluster read only and metadata is not allowed to be modified + type: boolean + clusterBlocksReadOnlyAllowDelete: + description: |- + (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + Make the whole cluster read only, but allows to delete indices to free up resources + type: boolean + clusterIndicesCloseEnable: + description: |- + (Boolean) If false, you cannot close open indices + If false, you cannot close open indices + type: boolean + clusterInfoUpdateInterval: + description: |- + (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + type: string + clusterMaxShardsPerNode: + description: |- + frozen data nodes; shards for closed indices do not count toward this limit + The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + type: number + clusterMaxShardsPerNodeFrozen: + description: |- + (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + type: number + clusterNoMasterBlock: + description: |- + (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + Specifies which operations are rejected when there is no active master in a cluster (all, write) + type: string + clusterPersistentTasksAllocationEnable: + description: |- + (String) Whether allocation for persistent tasks is active (all, none) + Whether allocation for persistent tasks is active (all, none) + type: string + clusterPersistentTasksAllocationRecheckInterval: + description: |- + (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + type: string + clusterRoutingAllocationAllowRebalance: + description: |- + (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + type: string + clusterRoutingAllocationAwarenessAttributes: + description: |- + (String) Use custom node attributes to take hardware configuration into account when allocating shards + Use custom node attributes to take hardware configuration into account when allocating shards + type: string + clusterRoutingAllocationBalanceIndex: + description: |- + (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + type: number + clusterRoutingAllocationBalanceShard: + description: |- + (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + type: number + clusterRoutingAllocationBalanceThreshold: + description: |- + (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + type: number + clusterRoutingAllocationClusterConcurrentRebalance: + description: |- + (Number) How many concurrent shard rebalances are allowed cluster wide + How many concurrent shard rebalances are allowed cluster wide + type: number + clusterRoutingAllocationDiskIncludeRelocations: + description: |- + (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + type: boolean + clusterRoutingAllocationDiskThresholdEnabled: + description: |- + (Boolean) Whether the disk allocation decider is active + Whether the disk allocation decider is active + type: boolean + clusterRoutingAllocationDiskWatermarkHigh: + description: |- + (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + type: string + clusterRoutingAllocationDiskWatermarkLow: + description: |- + (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + Allocator will not allocate shards to nodes that have more than this percentage disk used + type: string + clusterRoutingAllocationEnable: + description: |- + (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + type: string + clusterRoutingAllocationNodeConcurrentIncomingRecoveries: + description: |- + (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentOutgoingRecoveries: + description: |- + (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentRecoveries: + description: |- + (Number) A shortcut to set both incoming and outgoing recoveries + A shortcut to set both incoming and outgoing recoveries + type: number + clusterRoutingAllocationNodeInitialPrimariesRecoveries: + description: |- + (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + type: number + clusterRoutingAllocationSameShardHost: + description: |- + (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + type: boolean + clusterRoutingAllocationTotalShardsPerNode: + description: |- + (Number) Maximum number of primary and replica shards allocated to each node + Maximum number of primary and replica shards allocated to each node + type: number + clusterRoutingRebalanceEnable: + description: |- + (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + type: string + indicesBreakerFielddataLimit: + description: |- + (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + type: string + indicesBreakerFielddataOverhead: + description: |- + (Number) A constant that all field data estimations are multiplied by + A constant that all field data estimations are multiplied by + type: number + indicesBreakerRequestLimit: + description: |- + request data structures (e.g. calculating aggregations) are prevented from exceeding + The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + type: string + indicesBreakerRequestOverhead: + description: |- + (Number) A constant that all request estimations are multiplied by + A constant that all request estimations are multiplied by + type: number + indicesBreakerTotalLimit: + description: |- + (String) The percentage of total amount of memory that can be used across all breakers + The percentage of total amount of memory that can be used across all breakers + type: string + indicesRecoveryMaxBytesPerSec: + description: |- + (String) Maximum total inbound and outbound recovery traffic for each node, in mb + Maximum total inbound and outbound recovery traffic for each node, in mb + type: string + networkBreakerInflightRequestsLimit: + description: |- + (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + type: string + networkBreakerInflightRequestsOverhead: + description: |- + (Number) A constant that all in flight requests estimations are multiplied by + A constant that all in flight requests estimations are multiplied by + type: number + scriptMaxCompilationsRate: + description: |- + (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + type: string + searchDefaultSearchTimeout: + description: |- + wide default timeout for all search requests + A time string setting a cluster-wide default timeout for all search requests + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actionAutoCreateIndex: + description: |- + (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + Whether to automatically create an index if it doesn’t already exist and apply any configured index template + type: string + actionDestructiveRequiresName: + description: |- + (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + type: boolean + clusterBlocksReadOnly: + description: |- + (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + Make the whole cluster read only and metadata is not allowed to be modified + type: boolean + clusterBlocksReadOnlyAllowDelete: + description: |- + (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + Make the whole cluster read only, but allows to delete indices to free up resources + type: boolean + clusterIndicesCloseEnable: + description: |- + (Boolean) If false, you cannot close open indices + If false, you cannot close open indices + type: boolean + clusterInfoUpdateInterval: + description: |- + (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + type: string + clusterMaxShardsPerNode: + description: |- + frozen data nodes; shards for closed indices do not count toward this limit + The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + type: number + clusterMaxShardsPerNodeFrozen: + description: |- + (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + type: number + clusterNoMasterBlock: + description: |- + (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + Specifies which operations are rejected when there is no active master in a cluster (all, write) + type: string + clusterPersistentTasksAllocationEnable: + description: |- + (String) Whether allocation for persistent tasks is active (all, none) + Whether allocation for persistent tasks is active (all, none) + type: string + clusterPersistentTasksAllocationRecheckInterval: + description: |- + (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + type: string + clusterRoutingAllocationAllowRebalance: + description: |- + (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + type: string + clusterRoutingAllocationAwarenessAttributes: + description: |- + (String) Use custom node attributes to take hardware configuration into account when allocating shards + Use custom node attributes to take hardware configuration into account when allocating shards + type: string + clusterRoutingAllocationBalanceIndex: + description: |- + (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + type: number + clusterRoutingAllocationBalanceShard: + description: |- + (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + type: number + clusterRoutingAllocationBalanceThreshold: + description: |- + (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + type: number + clusterRoutingAllocationClusterConcurrentRebalance: + description: |- + (Number) How many concurrent shard rebalances are allowed cluster wide + How many concurrent shard rebalances are allowed cluster wide + type: number + clusterRoutingAllocationDiskIncludeRelocations: + description: |- + (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + type: boolean + clusterRoutingAllocationDiskThresholdEnabled: + description: |- + (Boolean) Whether the disk allocation decider is active + Whether the disk allocation decider is active + type: boolean + clusterRoutingAllocationDiskWatermarkHigh: + description: |- + (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + type: string + clusterRoutingAllocationDiskWatermarkLow: + description: |- + (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + Allocator will not allocate shards to nodes that have more than this percentage disk used + type: string + clusterRoutingAllocationEnable: + description: |- + (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + type: string + clusterRoutingAllocationNodeConcurrentIncomingRecoveries: + description: |- + (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentOutgoingRecoveries: + description: |- + (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentRecoveries: + description: |- + (Number) A shortcut to set both incoming and outgoing recoveries + A shortcut to set both incoming and outgoing recoveries + type: number + clusterRoutingAllocationNodeInitialPrimariesRecoveries: + description: |- + (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + type: number + clusterRoutingAllocationSameShardHost: + description: |- + (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + type: boolean + clusterRoutingAllocationTotalShardsPerNode: + description: |- + (Number) Maximum number of primary and replica shards allocated to each node + Maximum number of primary and replica shards allocated to each node + type: number + clusterRoutingRebalanceEnable: + description: |- + (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + type: string + indicesBreakerFielddataLimit: + description: |- + (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + type: string + indicesBreakerFielddataOverhead: + description: |- + (Number) A constant that all field data estimations are multiplied by + A constant that all field data estimations are multiplied by + type: number + indicesBreakerRequestLimit: + description: |- + request data structures (e.g. calculating aggregations) are prevented from exceeding + The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + type: string + indicesBreakerRequestOverhead: + description: |- + (Number) A constant that all request estimations are multiplied by + A constant that all request estimations are multiplied by + type: number + indicesBreakerTotalLimit: + description: |- + (String) The percentage of total amount of memory that can be used across all breakers + The percentage of total amount of memory that can be used across all breakers + type: string + indicesRecoveryMaxBytesPerSec: + description: |- + (String) Maximum total inbound and outbound recovery traffic for each node, in mb + Maximum total inbound and outbound recovery traffic for each node, in mb + type: string + networkBreakerInflightRequestsLimit: + description: |- + (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + type: string + networkBreakerInflightRequestsOverhead: + description: |- + (Number) A constant that all in flight requests estimations are multiplied by + A constant that all in flight requests estimations are multiplied by + type: number + scriptMaxCompilationsRate: + description: |- + (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + type: string + searchDefaultSearchTimeout: + description: |- + wide default timeout for all search requests + A time string setting a cluster-wide default timeout for all search requests + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SettingsStatus defines the observed state of Settings. + properties: + atProvider: + properties: + actionAutoCreateIndex: + description: |- + (String) Whether to automatically create an index if it doesn’t already exist and apply any configured index template + Whether to automatically create an index if it doesn’t already exist and apply any configured index template + type: string + actionDestructiveRequiresName: + description: |- + (Boolean) When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + When set to true, you must specify the index name to delete an index and it is not possible to delete all indices with _all or use wildcards + type: boolean + clusterBlocksReadOnly: + description: |- + (Boolean) Make the whole cluster read only and metadata is not allowed to be modified + Make the whole cluster read only and metadata is not allowed to be modified + type: boolean + clusterBlocksReadOnlyAllowDelete: + description: |- + (Boolean) Make the whole cluster read only, but allows to delete indices to free up resources + Make the whole cluster read only, but allows to delete indices to free up resources + type: boolean + clusterIndicesCloseEnable: + description: |- + (Boolean) If false, you cannot close open indices + If false, you cannot close open indices + type: boolean + clusterInfoUpdateInterval: + description: |- + (String) A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + A time string controlling how often OpenSearch should check on disk usage for each node in the cluster + type: string + clusterMaxShardsPerNode: + description: |- + frozen data nodes; shards for closed indices do not count toward this limit + The total number of primary and replica shards for the cluster, this number is multiplied by the number of non-frozen data nodes; shards for closed indices do not count toward this limit + type: number + clusterMaxShardsPerNodeFrozen: + description: |- + (Number) The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + The total number of primary and replica frozen shards, for the cluster; Ssards for closed indices do not count toward this limit, a cluster with no frozen data nodes is unlimited. + type: number + clusterNoMasterBlock: + description: |- + (String) Specifies which operations are rejected when there is no active master in a cluster (all, write) + Specifies which operations are rejected when there is no active master in a cluster (all, write) + type: string + clusterPersistentTasksAllocationEnable: + description: |- + (String) Whether allocation for persistent tasks is active (all, none) + Whether allocation for persistent tasks is active (all, none) + type: string + clusterPersistentTasksAllocationRecheckInterval: + description: |- + (String) A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + A time string controling how often assignment checks are performed to react to whether persistent tasks can be assigned to nodes + type: string + clusterRoutingAllocationAllowRebalance: + description: |- + (String) Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + Specify when shard rebalancing is allowed (always, indices_primaries_active, indices_all_active) + type: string + clusterRoutingAllocationAwarenessAttributes: + description: |- + (String) Use custom node attributes to take hardware configuration into account when allocating shards + Use custom node attributes to take hardware configuration into account when allocating shards + type: string + clusterRoutingAllocationBalanceIndex: + description: |- + (Number) Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + Weight factor for the number of shards per index allocated on a node, increasing this raises the tendency to equalize the number of shards per index across all nodes + type: number + clusterRoutingAllocationBalanceShard: + description: |- + (Number) Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + Weight factor for the total number of shards allocated on a node, increasing this raises the tendency to equalize the number of shards across all nodes + type: number + clusterRoutingAllocationBalanceThreshold: + description: |- + (Number) Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + Minimal optimization value of operations that should be performed, raising this will cause the cluster to be less aggressive about optimizing the shard balance + type: number + clusterRoutingAllocationClusterConcurrentRebalance: + description: |- + (Number) How many concurrent shard rebalances are allowed cluster wide + How many concurrent shard rebalances are allowed cluster wide + type: number + clusterRoutingAllocationDiskIncludeRelocations: + description: |- + (Boolean) Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + Whether the allocator will take into account shards that are currently being relocated to the target node when computing a node’s disk usage + type: boolean + clusterRoutingAllocationDiskThresholdEnabled: + description: |- + (Boolean) Whether the disk allocation decider is active + Whether the disk allocation decider is active + type: boolean + clusterRoutingAllocationDiskWatermarkHigh: + description: |- + (String) Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + Allocator will attempt to relocate shards away from a node whose disk usage is above this percentage disk used + type: string + clusterRoutingAllocationDiskWatermarkLow: + description: |- + (String) Allocator will not allocate shards to nodes that have more than this percentage disk used + Allocator will not allocate shards to nodes that have more than this percentage disk used + type: string + clusterRoutingAllocationEnable: + description: |- + (String) Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + Enable or disable allocation for specific kinds of shards (all, primaries, new_primaries, none) + type: string + clusterRoutingAllocationNodeConcurrentIncomingRecoveries: + description: |- + (Number) How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + How many incoming recoveries where the target shard (likely the replica unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentOutgoingRecoveries: + description: |- + (Number) How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + How many outgoing recoveries where the source shard (likely the primary unless a shard is relocating) are allocated on the node + type: number + clusterRoutingAllocationNodeConcurrentRecoveries: + description: |- + (Number) A shortcut to set both incoming and outgoing recoveries + A shortcut to set both incoming and outgoing recoveries + type: number + clusterRoutingAllocationNodeInitialPrimariesRecoveries: + description: |- + (Number) Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + Set a (usually) higher rate for primary recovery on node restart (usually from disk, so fast) + type: number + clusterRoutingAllocationSameShardHost: + description: |- + (Boolean) Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + Perform a check to prevent allocation of multiple instances of the same shard on a single host, if multiple nodes are started on the host + type: boolean + clusterRoutingAllocationTotalShardsPerNode: + description: |- + (Number) Maximum number of primary and replica shards allocated to each node + Maximum number of primary and replica shards allocated to each node + type: number + clusterRoutingRebalanceEnable: + description: |- + (String) Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + Allow rebalancing for specific kinds of shards (all, primaries, replicas, none) + type: string + id: + description: (String) The ID of this resource. + type: string + indicesBreakerFielddataLimit: + description: |- + (String) The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + The percentage of memory above which if loading a field into the field data cache would cause the cache to exceed this limit, an error is returned + type: string + indicesBreakerFielddataOverhead: + description: |- + (Number) A constant that all field data estimations are multiplied by + A constant that all field data estimations are multiplied by + type: number + indicesBreakerRequestLimit: + description: |- + request data structures (e.g. calculating aggregations) are prevented from exceeding + The percentabge of memory above which per-request data structures (e.g. calculating aggregations) are prevented from exceeding + type: string + indicesBreakerRequestOverhead: + description: |- + (Number) A constant that all request estimations are multiplied by + A constant that all request estimations are multiplied by + type: number + indicesBreakerTotalLimit: + description: |- + (String) The percentage of total amount of memory that can be used across all breakers + The percentage of total amount of memory that can be used across all breakers + type: string + indicesRecoveryMaxBytesPerSec: + description: |- + (String) Maximum total inbound and outbound recovery traffic for each node, in mb + Maximum total inbound and outbound recovery traffic for each node, in mb + type: string + networkBreakerInflightRequestsLimit: + description: |- + (String) The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + The percentage limit of memory usage on a node of all currently active incoming requests on transport or HTTP level + type: string + networkBreakerInflightRequestsOverhead: + description: |- + (Number) A constant that all in flight requests estimations are multiplied by + A constant that all in flight requests estimations are multiplied by + type: number + scriptMaxCompilationsRate: + description: |- + (String) Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled, expressed as compilations divided by a time string + type: string + searchDefaultSearchTimeout: + description: |- + wide default timeout for all search requests + A time string setting a cluster-wide default timeout for all search requests + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/component.opensearch.upbound.io_templates.yaml b/package/crds/component.opensearch.upbound.io_templates.yaml new file mode 100644 index 0000000..9ad9547 --- /dev/null +++ b/package/crds/component.opensearch.upbound.io_templates.yaml @@ -0,0 +1,355 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: templates.component.opensearch.upbound.io +spec: + group: component.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Template + listKind: TemplateList + plural: templates + singular: template + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Template is the Schema for the Templates API. Component templates + are building blocks for constructing index templates that specify index + mappings, settings, and aliases. You cannot directly apply a component template + to a data stream or index. To be applied, a component template must be included + in an index template’s composed_of list. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TemplateSpec defines the desired state of Template + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The JSON body of the template. + The JSON body of the template. + type: string + name: + description: |- + (String) Name of the component template to create. + Name of the component template to create. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The JSON body of the template. + The JSON body of the template. + type: string + name: + description: |- + (String) Name of the component template to create. + Name of the component template to create. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TemplateStatus defines the observed state of Template. + properties: + atProvider: + properties: + body: + description: |- + (String) The JSON body of the template. + The JSON body of the template. + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) Name of the component template to create. + Name of the component template to create. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/composable.opensearch.upbound.io_indextemplates.yaml b/package/crds/composable.opensearch.upbound.io_indextemplates.yaml new file mode 100644 index 0000000..f16d4fe --- /dev/null +++ b/package/crds/composable.opensearch.upbound.io_indextemplates.yaml @@ -0,0 +1,355 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: indextemplates.composable.opensearch.upbound.io +spec: + group: composable.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: IndexTemplate + listKind: IndexTemplateList + plural: indextemplates + singular: indextemplate + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: IndexTemplate is the Schema for the IndexTemplates API. Provides + an Composable index template resource. This resource uses the /_index_template + endpoint of the API that is available since version 2.0.0. Use opensearch_index_template + if you are using older versions or if you want to keep using legacy Index + Templates. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IndexTemplateSpec defines the desired state of IndexTemplate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: IndexTemplateStatus defines the observed state of IndexTemplate. + properties: + atProvider: + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dashboard.opensearch.upbound.io_objects.yaml b/package/crds/dashboard.opensearch.upbound.io_objects.yaml new file mode 100644 index 0000000..2af6c74 --- /dev/null +++ b/package/crds/dashboard.opensearch.upbound.io_objects.yaml @@ -0,0 +1,352 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: objects.dashboard.opensearch.upbound.io +spec: + group: dashboard.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Object + listKind: ObjectList + plural: objects + singular: object + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Object is the Schema for the Objects API. Provides an OpenSearch + Dashboards object resource. This resource interacts directly with the underlying + OpenSearch index backing Dashboards, so the format must match what Dashboards + the version of Dashboards is expecting. Dashboards with older versions - + directly pulling the JSON from a Dashboards index of the same version of + OpenSearch targeted by the provider is a workaround. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectSpec defines the desired state of Object + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The JSON body of the dashboard object. + The JSON body of the dashboard object. + type: string + index: + description: |- + (String) The name of the index where dashboard data is stored. + The name of the index where dashboard data is stored. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The JSON body of the dashboard object. + The JSON body of the dashboard object. + type: string + index: + description: |- + (String) The name of the index where dashboard data is stored. + The name of the index where dashboard data is stored. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + status: + description: ObjectStatus defines the observed state of Object. + properties: + atProvider: + properties: + body: + description: |- + (String) The JSON body of the dashboard object. + The JSON body of the dashboard object. + type: string + id: + description: (String) The ID of this resource. + type: string + index: + description: |- + (String) The name of the index where dashboard data is stored. + The name of the index where dashboard data is stored. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dashboard.opensearch.upbound.io_tenants.yaml b/package/crds/dashboard.opensearch.upbound.io_tenants.yaml new file mode 100644 index 0000000..7eff359 --- /dev/null +++ b/package/crds/dashboard.opensearch.upbound.io_tenants.yaml @@ -0,0 +1,352 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: tenants.dashboard.opensearch.upbound.io +spec: + group: dashboard.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Tenant + listKind: TenantList + plural: tenants + singular: tenant + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Tenant is the Schema for the Tenants API. Provides an OpenSearch + dashboard tenant resource. Please refer to the OpenSearch documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TenantSpec defines the desired state of Tenant + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: |- + (String) Description of the tenant. + Description of the tenant. + type: string + tenantName: + description: |- + (String) The name of the tenant. + The name of the tenant. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: |- + (String) Description of the tenant. + Description of the tenant. + type: string + tenantName: + description: |- + (String) The name of the tenant. + The name of the tenant. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.tenantName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tenantName) + || (has(self.initProvider) && has(self.initProvider.tenantName))' + status: + description: TenantStatus defines the observed state of Tenant. + properties: + atProvider: + properties: + description: + description: |- + (String) Description of the tenant. + Description of the tenant. + type: string + id: + description: (String) The ID of this resource. + type: string + index: + description: (String) + type: string + tenantName: + description: |- + (String) The name of the tenant. + The name of the tenant. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/data.opensearch.upbound.io_streams.yaml b/package/crds/data.opensearch.upbound.io_streams.yaml new file mode 100644 index 0000000..12d9be4 --- /dev/null +++ b/package/crds/data.opensearch.upbound.io_streams.yaml @@ -0,0 +1,334 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: streams.data.opensearch.upbound.io +spec: + group: data.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Stream + listKind: StreamList + plural: streams + singular: stream + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Stream is the Schema for the Streams API. A data stream lets + you store append-only time series data across multiple (hidden, auto-generated) + indices while giving you a single named resource for requests + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamSpec defines the desired state of Stream + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: |- + (String) Name of the data stream to create, must have a matching + Name of the data stream to create, must have a matching + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: |- + (String) Name of the data stream to create, must have a matching + Name of the data stream to create, must have a matching + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: StreamStatus defines the observed state of Stream. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) Name of the data stream to create, must have a matching + Name of the data stream to create, must have a matching + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/index.opensearch.upbound.io_templates.yaml b/package/crds/index.opensearch.upbound.io_templates.yaml new file mode 100644 index 0000000..5c75ded --- /dev/null +++ b/package/crds/index.opensearch.upbound.io_templates.yaml @@ -0,0 +1,352 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: templates.index.opensearch.upbound.io +spec: + group: index.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Template + listKind: TemplateList + plural: templates + singular: template + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Template is the Schema for the Templates API. Provides an OpenSearch + index template resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TemplateSpec defines the desired state of Template + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TemplateStatus defines the observed state of Template. + properties: + atProvider: + properties: + body: + description: |- + (String) The JSON body of the index template. + The JSON body of the index template. + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) The name of the index template. + The name of the index template. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ingest.opensearch.upbound.io_pipelines.yaml b/package/crds/ingest.opensearch.upbound.io_pipelines.yaml new file mode 100644 index 0000000..3bbec1e --- /dev/null +++ b/package/crds/ingest.opensearch.upbound.io_pipelines.yaml @@ -0,0 +1,352 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: pipelines.ingest.opensearch.upbound.io +spec: + group: ingest.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Pipeline + listKind: PipelineList + plural: pipelines + singular: pipeline + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Pipeline is the Schema for the Pipelines API. Provides an OpenSearch + ingest pipeline resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PipelineSpec defines the desired state of Pipeline + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The JSON body of the ingest pipeline + The JSON body of the ingest pipeline + type: string + name: + description: |- + (String) The name of the ingest pipeline + The name of the ingest pipeline + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The JSON body of the ingest pipeline + The JSON body of the ingest pipeline + type: string + name: + description: |- + (String) The name of the ingest pipeline + The name of the ingest pipeline + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: PipelineStatus defines the observed state of Pipeline. + properties: + atProvider: + properties: + body: + description: |- + (String) The JSON body of the ingest pipeline + The JSON body of the ingest pipeline + type: string + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) The name of the ingest pipeline + The name of the ingest pipeline + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ism.opensearch.upbound.io_policies.yaml b/package/crds/ism.opensearch.upbound.io_policies.yaml new file mode 100644 index 0000000..9e3d278 --- /dev/null +++ b/package/crds/ism.opensearch.upbound.io_policies.yaml @@ -0,0 +1,383 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policies.ism.opensearch.upbound.io +spec: + group: ism.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Provides an OpenSearch + Index State Management (ISM) policy. Please refer to the OpenSearch ISM + documentation for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + policyId: + description: |- + (String) The id of the ISM policy. + The id of the ISM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the ISM policy version. + The primary term of the ISM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the ISM policy version. + The sequence number of the ISM policy version. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + policyId: + description: |- + (String) The id of the ISM policy. + The id of the ISM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the ISM policy version. + The primary term of the ISM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the ISM policy version. + The sequence number of the ISM policy version. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.policyId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyId) + || (has(self.initProvider) && has(self.initProvider.policyId))' + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + id: + description: (String) The ID of this resource. + type: string + policyId: + description: |- + (String) The id of the ISM policy. + The id of the ISM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the ISM policy version. + The primary term of the ISM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the ISM policy version. + The sequence number of the ISM policy version. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/ism.opensearch.upbound.io_policymappings.yaml b/package/crds/ism.opensearch.upbound.io_policymappings.yaml new file mode 100644 index 0000000..81a333d --- /dev/null +++ b/package/crds/ism.opensearch.upbound.io_policymappings.yaml @@ -0,0 +1,422 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policymappings.ism.opensearch.upbound.io +spec: + group: ism.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: PolicyMapping + listKind: PolicyMappingList + plural: policymappings + singular: policymapping + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PolicyMapping is the Schema for the PolicyMappings API. Provides + an OpenSearch Index State Management (ISM) policy. Please refer to the OpenSearch + ISM documentation for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicyMappingSpec defines the desired state of PolicyMapping + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + include: + description: |- + (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + items: + additionalProperties: + type: string + type: object + type: array + indexes: + description: |- + (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + type: string + isSafe: + description: (Boolean) + type: boolean + managedIndexes: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + policyId: + description: |- + (String) The name of the policy. + The name of the policy. + type: string + state: + description: |- + (String) After a change in policy takes place, specify the state for the index to transition to + After a change in policy takes place, specify the state for the index to transition to + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + include: + description: |- + (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + items: + additionalProperties: + type: string + type: object + type: array + indexes: + description: |- + (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + type: string + isSafe: + description: (Boolean) + type: boolean + managedIndexes: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + policyId: + description: |- + (String) The name of the policy. + The name of the policy. + type: string + state: + description: |- + (String) After a change in policy takes place, specify the state for the index to transition to + After a change in policy takes place, specify the state for the index to transition to + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.indexes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.indexes) + || (has(self.initProvider) && has(self.initProvider.indexes))' + - message: spec.forProvider.policyId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyId) + || (has(self.initProvider) && has(self.initProvider.policyId))' + status: + description: PolicyMappingStatus defines the observed state of PolicyMapping. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + include: + description: |- + (Set of Map of String) When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + When updating multiple indices, you might want to include a state filter to only affect certain managed indices. The background process only applies the change if the index is currently in the state specified. + items: + additionalProperties: + type: string + type: object + type: array + indexes: + description: |- + (String) Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + Name of the index to apply the policy to. You can use an index pattern to update multiple indices at once. + type: string + isSafe: + description: (Boolean) + type: boolean + managedIndexes: + description: (Set of String) + items: + type: string + type: array + x-kubernetes-list-type: set + policyId: + description: |- + (String) The name of the policy. + The name of the policy. + type: string + state: + description: |- + (String) After a change in policy takes place, specify the state for the index to transition to + After a change in policy takes place, specify the state for the index to transition to + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/opensearch.opensearch.upbound.io_indices.yaml b/package/crds/opensearch.opensearch.upbound.io_indices.yaml new file mode 100644 index 0000000..8093119 --- /dev/null +++ b/package/crds/opensearch.opensearch.upbound.io_indices.yaml @@ -0,0 +1,1242 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: indices.opensearch.opensearch.upbound.io +spec: + group: opensearch.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Index + listKind: IndexList + plural: indices + singular: index + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Index is the Schema for the Indexs API. Provides an OpenSearch + index resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IndexSpec defines the desired state of Index + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aliases: + description: |- + (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + type: string + analysisAnalyzer: + description: |- + (String) A JSON string describing the analyzers applied to the index. + A JSON string describing the analyzers applied to the index. + type: string + analysisCharFilter: + description: |- + (String) A JSON string describing the char_filters applied to the index. + A JSON string describing the char_filters applied to the index. + type: string + analysisFilter: + description: |- + (String) A JSON string describing the filters applied to the index. + A JSON string describing the filters applied to the index. + type: string + analysisNormalizer: + description: |- + (String) A JSON string describing the normalizers applied to the index. + A JSON string describing the normalizers applied to the index. + type: string + analysisTokenizer: + description: |- + (String) A JSON string describing the tokenizers applied to the index. + A JSON string describing the tokenizers applied to the index. + type: string + analyzeMaxTokenCount: + description: |- + (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + The maximum number of tokens that can be produced using _analyze API. A stringified number. + type: string + autoExpandReplicas: + description: |- + 5) or use all for the upper bound (e.g. 0-all) + Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + type: string + blocksMetadata: + description: |- + (Boolean) Set to true to disable index metadata reads and writes. + Set to `true` to disable index metadata reads and writes. + type: boolean + blocksRead: + description: |- + (Boolean) Set to true to disable read operations against the index. + Set to `true` to disable read operations against the index. + type: boolean + blocksReadOnly: + description: |- + (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + type: boolean + blocksReadOnlyAllowDelete: + description: |- + (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + type: boolean + blocksWrite: + description: |- + (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + Set to `true` to disable data write operations against the index. This setting does not affect metadata. + type: boolean + codec: + description: |- + (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + type: string + defaultPipeline: + description: |- + (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + type: string + forceDestroy: + description: |- + (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + A boolean that indicates that the index should be deleted even if it contains documents. + type: boolean + gcDeletes: + description: |- + (String) The length of time that a deleted document's version number remains available for further versioned operations. + The length of time that a deleted document's version number remains available for further versioned operations. + type: string + highlightMaxAnalyzedOffset: + description: |- + (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + The maximum number of characters that will be analyzed for a highlight request. A stringified number. + type: string + includeTypeName: + description: |- + (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + type: string + indexKnn: + description: |- + NN search functionality will be disabled. + Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + type: boolean + indexKnnAlgoParamEfSearch: + description: |- + NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + type: string + indexSimilarityDefault: + description: |- + (String) A JSON string describing the default index similarity config. + A JSON string describing the default index similarity config. + type: string + indexingSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + indexingSlowlogSource: + description: |- + (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + type: string + indexingSlowlogThresholdIndexDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + type: string + indexingSlowlogThresholdIndexInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + type: string + indexingSlowlogThresholdIndexTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + type: string + indexingSlowlogThresholdIndexWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + type: string + loadFixedBitsetFiltersEagerly: + description: |- + loaded for nested queries. This can be set only on creation. + Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + type: boolean + mappings: + description: |- + (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + type: string + maxDocvalueFieldsSearch: + description: |- + (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + type: string + maxInnerResultWindow: + description: |- + (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + type: string + maxNgramDiff: + description: |- + (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + type: string + maxRefreshListeners: + description: |- + (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + Maximum number of refresh listeners available on each shard of the index. A stringified number. + type: string + maxRegexLength: + description: |- + (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + The maximum length of regex that can be used in Regexp Query. A stringified number. + type: string + maxRescoreWindow: + description: |- + (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + type: string + maxResultWindow: + description: |- + (String) The maximum value of from + size for searches to this index. A stringified number. + The maximum value of `from + size` for searches to this index. A stringified number. + type: string + maxScriptFields: + description: |- + (String) The maximum number of script_fields that are allowed in a query. A stringified number. + The maximum number of `script_fields` that are allowed in a query. A stringified number. + type: string + maxShingleDiff: + description: |- + (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + type: string + maxTermsCount: + description: |- + (String) The maximum number of terms that can be used in Terms Query. A stringified number. + The maximum number of terms that can be used in Terms Query. A stringified number. + type: string + name: + description: |- + (String) Name of the index to create + Name of the index to create + type: string + numberOfReplicas: + description: |- + (String) Number of shard replicas. A stringified number. + Number of shard replicas. A stringified number. + type: string + numberOfRoutingShards: + description: |- + (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + type: string + numberOfShards: + description: |- + (String) Number of shards for the index. This can be set only on creation. + Number of shards for the index. This can be set only on creation. + type: string + refreshInterval: + description: |- + 1 to disable refresh. + How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + type: string + rolloverAlias: + description: (String) + type: string + routingAllocationEnable: + description: |- + (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + type: string + routingPartitionSize: + description: |- + (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + type: string + routingRebalanceEnable: + description: |- + (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + type: string + searchIdleAfter: + description: |- + (String) How long a shard can not receive a search or get request until it’s considered search idle. + How long a shard can not receive a search or get request until it’s considered search idle. + type: string + searchSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + searchSlowlogThresholdFetchDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdFetchInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdFetchTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdFetchWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + type: string + searchSlowlogThresholdQueryDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdQueryInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdQueryTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdQueryWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + type: string + shardCheckOnStartup: + description: |- + (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + type: string + sortField: + description: |- + (String) The field to sort shards in this index by. + The field to sort shards in this index by. + type: string + sortOrder: + description: |- + (String) The direction to sort shards in. Accepts asc, desc. + The direction to sort shards in. Accepts `asc`, `desc`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aliases: + description: |- + (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + type: string + analysisAnalyzer: + description: |- + (String) A JSON string describing the analyzers applied to the index. + A JSON string describing the analyzers applied to the index. + type: string + analysisCharFilter: + description: |- + (String) A JSON string describing the char_filters applied to the index. + A JSON string describing the char_filters applied to the index. + type: string + analysisFilter: + description: |- + (String) A JSON string describing the filters applied to the index. + A JSON string describing the filters applied to the index. + type: string + analysisNormalizer: + description: |- + (String) A JSON string describing the normalizers applied to the index. + A JSON string describing the normalizers applied to the index. + type: string + analysisTokenizer: + description: |- + (String) A JSON string describing the tokenizers applied to the index. + A JSON string describing the tokenizers applied to the index. + type: string + analyzeMaxTokenCount: + description: |- + (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + The maximum number of tokens that can be produced using _analyze API. A stringified number. + type: string + autoExpandReplicas: + description: |- + 5) or use all for the upper bound (e.g. 0-all) + Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + type: string + blocksMetadata: + description: |- + (Boolean) Set to true to disable index metadata reads and writes. + Set to `true` to disable index metadata reads and writes. + type: boolean + blocksRead: + description: |- + (Boolean) Set to true to disable read operations against the index. + Set to `true` to disable read operations against the index. + type: boolean + blocksReadOnly: + description: |- + (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + type: boolean + blocksReadOnlyAllowDelete: + description: |- + (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + type: boolean + blocksWrite: + description: |- + (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + Set to `true` to disable data write operations against the index. This setting does not affect metadata. + type: boolean + codec: + description: |- + (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + type: string + defaultPipeline: + description: |- + (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + type: string + forceDestroy: + description: |- + (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + A boolean that indicates that the index should be deleted even if it contains documents. + type: boolean + gcDeletes: + description: |- + (String) The length of time that a deleted document's version number remains available for further versioned operations. + The length of time that a deleted document's version number remains available for further versioned operations. + type: string + highlightMaxAnalyzedOffset: + description: |- + (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + The maximum number of characters that will be analyzed for a highlight request. A stringified number. + type: string + includeTypeName: + description: |- + (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + type: string + indexKnn: + description: |- + NN search functionality will be disabled. + Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + type: boolean + indexKnnAlgoParamEfSearch: + description: |- + NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + type: string + indexSimilarityDefault: + description: |- + (String) A JSON string describing the default index similarity config. + A JSON string describing the default index similarity config. + type: string + indexingSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + indexingSlowlogSource: + description: |- + (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + type: string + indexingSlowlogThresholdIndexDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + type: string + indexingSlowlogThresholdIndexInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + type: string + indexingSlowlogThresholdIndexTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + type: string + indexingSlowlogThresholdIndexWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + type: string + loadFixedBitsetFiltersEagerly: + description: |- + loaded for nested queries. This can be set only on creation. + Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + type: boolean + mappings: + description: |- + (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + type: string + maxDocvalueFieldsSearch: + description: |- + (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + type: string + maxInnerResultWindow: + description: |- + (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + type: string + maxNgramDiff: + description: |- + (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + type: string + maxRefreshListeners: + description: |- + (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + Maximum number of refresh listeners available on each shard of the index. A stringified number. + type: string + maxRegexLength: + description: |- + (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + The maximum length of regex that can be used in Regexp Query. A stringified number. + type: string + maxRescoreWindow: + description: |- + (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + type: string + maxResultWindow: + description: |- + (String) The maximum value of from + size for searches to this index. A stringified number. + The maximum value of `from + size` for searches to this index. A stringified number. + type: string + maxScriptFields: + description: |- + (String) The maximum number of script_fields that are allowed in a query. A stringified number. + The maximum number of `script_fields` that are allowed in a query. A stringified number. + type: string + maxShingleDiff: + description: |- + (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + type: string + maxTermsCount: + description: |- + (String) The maximum number of terms that can be used in Terms Query. A stringified number. + The maximum number of terms that can be used in Terms Query. A stringified number. + type: string + name: + description: |- + (String) Name of the index to create + Name of the index to create + type: string + numberOfReplicas: + description: |- + (String) Number of shard replicas. A stringified number. + Number of shard replicas. A stringified number. + type: string + numberOfRoutingShards: + description: |- + (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + type: string + numberOfShards: + description: |- + (String) Number of shards for the index. This can be set only on creation. + Number of shards for the index. This can be set only on creation. + type: string + refreshInterval: + description: |- + 1 to disable refresh. + How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + type: string + rolloverAlias: + description: (String) + type: string + routingAllocationEnable: + description: |- + (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + type: string + routingPartitionSize: + description: |- + (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + type: string + routingRebalanceEnable: + description: |- + (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + type: string + searchIdleAfter: + description: |- + (String) How long a shard can not receive a search or get request until it’s considered search idle. + How long a shard can not receive a search or get request until it’s considered search idle. + type: string + searchSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + searchSlowlogThresholdFetchDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdFetchInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdFetchTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdFetchWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + type: string + searchSlowlogThresholdQueryDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdQueryInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdQueryTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdQueryWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + type: string + shardCheckOnStartup: + description: |- + (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + type: string + sortField: + description: |- + (String) The field to sort shards in this index by. + The field to sort shards in this index by. + type: string + sortOrder: + description: |- + (String) The direction to sort shards in. Accepts asc, desc. + The direction to sort shards in. Accepts `asc`, `desc`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: IndexStatus defines the observed state of Index. + properties: + atProvider: + properties: + aliases: + description: |- + (String) A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + A JSON string describing a set of aliases. The index aliases API allows aliasing an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be mapped to more than one index, and when specifying it, the alias will automatically expand to the aliased indices. + type: string + analysisAnalyzer: + description: |- + (String) A JSON string describing the analyzers applied to the index. + A JSON string describing the analyzers applied to the index. + type: string + analysisCharFilter: + description: |- + (String) A JSON string describing the char_filters applied to the index. + A JSON string describing the char_filters applied to the index. + type: string + analysisFilter: + description: |- + (String) A JSON string describing the filters applied to the index. + A JSON string describing the filters applied to the index. + type: string + analysisNormalizer: + description: |- + (String) A JSON string describing the normalizers applied to the index. + A JSON string describing the normalizers applied to the index. + type: string + analysisTokenizer: + description: |- + (String) A JSON string describing the tokenizers applied to the index. + A JSON string describing the tokenizers applied to the index. + type: string + analyzeMaxTokenCount: + description: |- + (String) The maximum number of tokens that can be produced using _analyze API. A stringified number. + The maximum number of tokens that can be produced using _analyze API. A stringified number. + type: string + autoExpandReplicas: + description: |- + 5) or use all for the upper bound (e.g. 0-all) + Set the number of replicas to the node count in the cluster. Set to a dash delimited lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all) + type: string + blocksMetadata: + description: |- + (Boolean) Set to true to disable index metadata reads and writes. + Set to `true` to disable index metadata reads and writes. + type: boolean + blocksRead: + description: |- + (Boolean) Set to true to disable read operations against the index. + Set to `true` to disable read operations against the index. + type: boolean + blocksReadOnly: + description: |- + (Boolean) Set to true to make the index and index metadata read only, false to allow writes and metadata changes. + Set to `true` to make the index and index metadata read only, `false` to allow writes and metadata changes. + type: boolean + blocksReadOnlyAllowDelete: + description: |- + (Boolean) Identical to index.blocks.read_only but allows deleting the index to free up resources. + Identical to `index.blocks.read_only` but allows deleting the index to free up resources. + type: boolean + blocksWrite: + description: |- + (Boolean) Set to true to disable data write operations against the index. This setting does not affect metadata. + Set to `true` to disable data write operations against the index. This setting does not affect metadata. + type: boolean + codec: + description: |- + (String) The default value compresses stored data with LZ4 compression, but this can be set to best_compression which uses DEFLATE for a higher compression ratio. This can be set only on creation. + The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` which uses DEFLATE for a higher compression ratio. This can be set only on creation. + type: string + defaultPipeline: + description: |- + (String) The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + The default ingest node pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. + type: string + forceDestroy: + description: |- + (Boolean) A boolean that indicates that the index should be deleted even if it contains documents. + A boolean that indicates that the index should be deleted even if it contains documents. + type: boolean + gcDeletes: + description: |- + (String) The length of time that a deleted document's version number remains available for further versioned operations. + The length of time that a deleted document's version number remains available for further versioned operations. + type: string + highlightMaxAnalyzedOffset: + description: |- + (String) The maximum number of characters that will be analyzed for a highlight request. A stringified number. + The maximum number of characters that will be analyzed for a highlight request. A stringified number. + type: string + id: + description: (String) The ID of this resource. + type: string + includeTypeName: + description: |- + (String) A string that indicates if and what we should pass to include_type_name parameter. Set to "false" when trying to create an index on a v6 cluster without a doc type or set to "true" when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + A string that indicates if and what we should pass to include_type_name parameter. Set to `"false"` when trying to create an index on a v6 cluster without a doc type or set to `"true"` when trying to create an index on a v7 cluster with a doc type. Since mapping updates are not currently supported, this applies only on index create. + type: string + indexKnn: + description: |- + NN search functionality will be disabled. + Indicates whether the index should build native library indices for the knn_vector fields. If set to false, the knn_vector fields will be stored in doc values, but Approximate k-NN search functionality will be disabled. + type: boolean + indexKnnAlgoParamEfSearch: + description: |- + NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib implementation. + The size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches. Only available for nmslib. + type: string + indexSimilarityDefault: + description: |- + (String) A JSON string describing the default index similarity config. + A JSON string describing the default index similarity config. + type: string + indexingSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + indexingSlowlogSource: + description: |- + (String) Set the number of characters of the _source to include in the slowlog lines, false or 0 will skip logging the source entirely and setting it to true will log the entire source regardless of size. The original _source is reformatted by default to make sure that it fits on a single log line. + Set the number of characters of the `_source` to include in the slowlog lines, `false` or `0` will skip logging the source entirely and setting it to `true` will log the entire source regardless of size. The original `_source` is reformatted by default to make sure that it fits on a single log line. + type: string + indexingSlowlogThresholdIndexDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `2s` + type: string + indexingSlowlogThresholdIndexInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `5s` + type: string + indexingSlowlogThresholdIndexTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `500ms` + type: string + indexingSlowlogThresholdIndexWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches for indexing queries, in time units, e.g. `10s` + type: string + loadFixedBitsetFiltersEagerly: + description: |- + loaded for nested queries. This can be set only on creation. + Indicates whether cached filters are pre-loaded for nested queries. This can be set only on creation. + type: boolean + mappings: + description: |- + (String) A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + A JSON string defining how documents in the index, and the fields they contain, are stored and indexed. To avoid the complexities of field mapping updates, updates of this field are not allowed via this provider. + type: string + maxDocvalueFieldsSearch: + description: |- + (String) The maximum number of docvalue_fields that are allowed in a query. A stringified number. + The maximum number of `docvalue_fields` that are allowed in a query. A stringified number. + type: string + maxInnerResultWindow: + description: |- + (String) The maximum value of from + size for inner hits definition and top hits aggregations to this index. A stringified number. + The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. A stringified number. + type: string + maxNgramDiff: + description: |- + (String) The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter. A stringified number. + type: string + maxRefreshListeners: + description: |- + (String) Maximum number of refresh listeners available on each shard of the index. A stringified number. + Maximum number of refresh listeners available on each shard of the index. A stringified number. + type: string + maxRegexLength: + description: |- + (String) The maximum length of regex that can be used in Regexp Query. A stringified number. + The maximum length of regex that can be used in Regexp Query. A stringified number. + type: string + maxRescoreWindow: + description: |- + (String) The maximum value of window_size for rescore requests in searches of this index. A stringified number. + The maximum value of `window_size` for `rescore` requests in searches of this index. A stringified number. + type: string + maxResultWindow: + description: |- + (String) The maximum value of from + size for searches to this index. A stringified number. + The maximum value of `from + size` for searches to this index. A stringified number. + type: string + maxScriptFields: + description: |- + (String) The maximum number of script_fields that are allowed in a query. A stringified number. + The maximum number of `script_fields` that are allowed in a query. A stringified number. + type: string + maxShingleDiff: + description: |- + (String) The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + The maximum allowed difference between max_shingle_size and min_shingle_size for ShingleTokenFilter. A stringified number. + type: string + maxTermsCount: + description: |- + (String) The maximum number of terms that can be used in Terms Query. A stringified number. + The maximum number of terms that can be used in Terms Query. A stringified number. + type: string + name: + description: |- + (String) Name of the index to create + Name of the index to create + type: string + numberOfReplicas: + description: |- + (String) Number of shard replicas. A stringified number. + Number of shard replicas. A stringified number. + type: string + numberOfRoutingShards: + description: |- + (String) Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + Value used with number_of_shards to route documents to a primary shard. A stringified number. This can be set only on creation. + type: string + numberOfShards: + description: |- + (String) Number of shards for the index. This can be set only on creation. + Number of shards for the index. This can be set only on creation. + type: string + refreshInterval: + description: |- + 1 to disable refresh. + How often to perform a refresh operation, which makes recent changes to the index visible to search. Can be set to `-1` to disable refresh. + type: string + rolloverAlias: + description: (String) + type: string + routingAllocationEnable: + description: |- + (String) Controls shard allocation for this index. It can be set to: all , primaries , new_primaries , none. + Controls shard allocation for this index. It can be set to: `all` , `primaries` , `new_primaries` , `none`. + type: string + routingPartitionSize: + description: |- + (String) The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + The number of shards a custom routing value can go to. A stringified number. This can be set only on creation. + type: string + routingRebalanceEnable: + description: |- + (String) Enables shard rebalancing for this index. It can be set to: all, primaries , replicas , none. + Enables shard rebalancing for this index. It can be set to: `all`, `primaries` , `replicas` , `none`. + type: string + searchIdleAfter: + description: |- + (String) How long a shard can not receive a search or get request until it’s considered search idle. + How long a shard can not receive a search or get request until it’s considered search idle. + type: string + searchSlowlogLevel: + description: |- + (String) Set which logging level to use for the search slow log, can be: warn, info, debug, trace + Set which logging level to use for the search slow log, can be: `warn`, `info`, `debug`, `trace` + type: string + searchSlowlogThresholdFetchDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdFetchInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdFetchTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdFetchWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the fetch phase, in time units, e.g. `10s` + type: string + searchSlowlogThresholdQueryDebug: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 2s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `2s` + type: string + searchSlowlogThresholdQueryInfo: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 5s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `5s` + type: string + searchSlowlogThresholdQueryTrace: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 500ms + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `500ms` + type: string + searchSlowlogThresholdQueryWarn: + description: |- + (String) Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. 10s + Set the cutoff for shard level slow search logging of slow searches in the query phase, in time units, e.g. `10s` + type: string + shardCheckOnStartup: + description: |- + (String) Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts false, true, checksum. + Whether or not shards should be checked for corruption before opening. When corruption is detected, it will prevent the shard from being opened. Accepts `false`, `true`, `checksum`. + type: string + sortField: + description: |- + (String) The field to sort shards in this index by. + The field to sort shards in this index by. + type: string + sortOrder: + description: |- + (String) The direction to sort shards in. Accepts asc, desc. + The direction to sort shards in. Accepts `asc`, `desc`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/opensearch.opensearch.upbound.io_monitors.yaml b/package/crds/opensearch.opensearch.upbound.io_monitors.yaml new file mode 100644 index 0000000..fcd2fd0 --- /dev/null +++ b/package/crds/opensearch.opensearch.upbound.io_monitors.yaml @@ -0,0 +1,333 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: monitors.opensearch.opensearch.upbound.io +spec: + group: opensearch.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Monitor + listKind: MonitorList + plural: monitors + singular: monitor + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Monitor is the Schema for the Monitors API. Provides an OpenSearch + monitor. Please refer to the OpenSearch monitor documentation for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorSpec defines the desired state of Monitor + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The monitor document + The monitor document + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The monitor document + The monitor document + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + status: + description: MonitorStatus defines the observed state of Monitor. + properties: + atProvider: + properties: + body: + description: |- + (String) The monitor document + The monitor document + type: string + id: + description: (String) The ID of this resource. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/opensearch.opensearch.upbound.io_roles.yaml b/package/crds/opensearch.opensearch.upbound.io_roles.yaml new file mode 100644 index 0000000..1b5b569 --- /dev/null +++ b/package/crds/opensearch.opensearch.upbound.io_roles.yaml @@ -0,0 +1,580 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: roles.opensearch.opensearch.upbound.io +spec: + group: opensearch.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Role + listKind: RoleList + plural: roles + singular: role + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Role is the Schema for the Roles API. Provides an OpenSearch + security role resource. Please refer to the OpenSearch Access Control documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RoleSpec defines the desired state of Role + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterPermissions: + description: |- + (Set of String) A list of cluster permissions. + A list of cluster permissions. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role. + Description of the role. + type: string + indexPermissions: + description: |- + (Block Set) A configuration of index permissions (see below for nested schema) + A configuration of index permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + documentLevelSecurity: + description: |- + level security (json formatted using jsonencode). + A selector for document-level security (json formatted using jsonencode). + type: string + fieldLevelSecurity: + description: |- + level security. + A list of selectors for field-level security. + items: + type: string + type: array + x-kubernetes-list-type: set + indexPatterns: + description: |- + (Set of String) A list of glob patterns for the index names. + A list of glob patterns for the index names. + items: + type: string + type: array + x-kubernetes-list-type: set + maskedFields: + description: |- + (Set of String) A list of masked fields + A list of masked fields + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + tenantPermissions: + description: |- + (Block Set) A configuration of tenant permissions (see below for nested schema) + A configuration of tenant permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + tenantPatterns: + description: |- + (Set of String) A list of glob patterns for the tenant names + A list of glob patterns for the tenant names + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterPermissions: + description: |- + (Set of String) A list of cluster permissions. + A list of cluster permissions. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role. + Description of the role. + type: string + indexPermissions: + description: |- + (Block Set) A configuration of index permissions (see below for nested schema) + A configuration of index permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + documentLevelSecurity: + description: |- + level security (json formatted using jsonencode). + A selector for document-level security (json formatted using jsonencode). + type: string + fieldLevelSecurity: + description: |- + level security. + A list of selectors for field-level security. + items: + type: string + type: array + x-kubernetes-list-type: set + indexPatterns: + description: |- + (Set of String) A list of glob patterns for the index names. + A list of glob patterns for the index names. + items: + type: string + type: array + x-kubernetes-list-type: set + maskedFields: + description: |- + (Set of String) A list of masked fields + A list of masked fields + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + tenantPermissions: + description: |- + (Block Set) A configuration of tenant permissions (see below for nested schema) + A configuration of tenant permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + tenantPatterns: + description: |- + (Set of String) A list of glob patterns for the tenant names + A list of glob patterns for the tenant names + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.roleName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roleName) + || (has(self.initProvider) && has(self.initProvider.roleName))' + status: + description: RoleStatus defines the observed state of Role. + properties: + atProvider: + properties: + clusterPermissions: + description: |- + (Set of String) A list of cluster permissions. + A list of cluster permissions. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role. + Description of the role. + type: string + id: + description: (String) The ID of this resource. + type: string + indexPermissions: + description: |- + (Block Set) A configuration of index permissions (see below for nested schema) + A configuration of index permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + documentLevelSecurity: + description: |- + level security (json formatted using jsonencode). + A selector for document-level security (json formatted using jsonencode). + type: string + fieldLevelSecurity: + description: |- + level security. + A list of selectors for field-level security. + items: + type: string + type: array + x-kubernetes-list-type: set + indexPatterns: + description: |- + (Set of String) A list of glob patterns for the index names. + A list of glob patterns for the index names. + items: + type: string + type: array + x-kubernetes-list-type: set + maskedFields: + description: |- + (Set of String) A list of masked fields + A list of masked fields + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + tenantPermissions: + description: |- + (Block Set) A configuration of tenant permissions (see below for nested schema) + A configuration of tenant permissions + items: + properties: + allowedActions: + description: |- + (Set of String) A list of allowed actions. + A list of allowed actions. + items: + type: string + type: array + x-kubernetes-list-type: set + tenantPatterns: + description: |- + (Set of String) A list of glob patterns for the tenant names + A list of glob patterns for the tenant names + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/opensearch.opensearch.upbound.io_scripts.yaml b/package/crds/opensearch.opensearch.upbound.io_scripts.yaml new file mode 100644 index 0000000..f98dfbf --- /dev/null +++ b/package/crds/opensearch.opensearch.upbound.io_scripts.yaml @@ -0,0 +1,367 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: scripts.opensearch.opensearch.upbound.io +spec: + group: opensearch.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Script + listKind: ScriptList + plural: scripts + singular: script + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Script is the Schema for the Scripts API. Provides an OpenSearch + script resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScriptSpec defines the desired state of Script + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + lang: + description: |- + (String) Specifies the language the script is written in. Defaults to painless. + Specifies the language the script is written in. Defaults to painless. + type: string + scriptId: + description: |- + (String) Identifier for the stored script. Must be unique within the cluster. + Identifier for the stored script. Must be unique within the cluster. + type: string + source: + description: |- + (String) The source of the stored script + The source of the stored script + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + lang: + description: |- + (String) Specifies the language the script is written in. Defaults to painless. + Specifies the language the script is written in. Defaults to painless. + type: string + scriptId: + description: |- + (String) Identifier for the stored script. Must be unique within the cluster. + Identifier for the stored script. Must be unique within the cluster. + type: string + source: + description: |- + (String) The source of the stored script + The source of the stored script + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.scriptId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scriptId) + || (has(self.initProvider) && has(self.initProvider.scriptId))' + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + status: + description: ScriptStatus defines the observed state of Script. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + lang: + description: |- + (String) Specifies the language the script is written in. Defaults to painless. + Specifies the language the script is written in. Defaults to painless. + type: string + scriptId: + description: |- + (String) Identifier for the stored script. Must be unique within the cluster. + Identifier for the stored script. Must be unique within the cluster. + type: string + source: + description: |- + (String) The source of the stored script + The source of the stored script + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/opensearch.opensearch.upbound.io_users.yaml b/package/crds/opensearch.opensearch.upbound.io_users.yaml new file mode 100644 index 0000000..5e5252a --- /dev/null +++ b/package/crds/opensearch.opensearch.upbound.io_users.yaml @@ -0,0 +1,435 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: users.opensearch.opensearch.upbound.io +spec: + group: opensearch.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: User + listKind: UserList + plural: users + singular: user + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. Provides an OpenSearch + security user. Please refer to the OpenSearch Access Control documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + attributes: + additionalProperties: + type: string + description: |- + (Map of String) A map of arbitrary key value string pairs stored alongside of users. + A map of arbitrary key value string pairs stored alongside of users. + type: object + x-kubernetes-map-type: granular + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the user. + Description of the user. + type: string + passwordHashSecretRef: + description: |- + hashed password for the user, cannot be specified with password. + The pre-hashed password for the user, cannot be specified with `password`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + passwordSecretRef: + description: |- + descriptive HTTP 400 Bad Request error. For AWS OpenSearch domains "password must be at least 8 characters long and contain at least one uppercase letter, one lowercase letter, one digit, and one special character". + The plain text password for the user, cannot be specified with `password_hash`. Some implementations may enforce a password policy. Invalid passwords may cause a non-descriptive HTTP 400 Bad Request error. For AWS OpenSearch domains "password must be at least 8 characters long and contain at least one uppercase letter, one lowercase letter, one digit, and one special character". + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: |- + (String) The name of the security user. + The name of the security user. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + attributes: + additionalProperties: + type: string + description: |- + (Map of String) A map of arbitrary key value string pairs stored alongside of users. + A map of arbitrary key value string pairs stored alongside of users. + type: object + x-kubernetes-map-type: granular + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the user. + Description of the user. + type: string + username: + description: |- + (String) The name of the security user. + The name of the security user. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.username is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.username) + || (has(self.initProvider) && has(self.initProvider.username))' + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + attributes: + additionalProperties: + type: string + description: |- + (Map of String) A map of arbitrary key value string pairs stored alongside of users. + A map of arbitrary key value string pairs stored alongside of users. + type: object + x-kubernetes-map-type: granular + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the user. + Description of the user. + type: string + id: + description: (String) The ID of this resource. + type: string + username: + description: |- + (String) The name of the security user. + The name of the security user. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/roles.opensearch.upbound.io_mappings.yaml b/package/crds/roles.opensearch.upbound.io_mappings.yaml new file mode 100644 index 0000000..2143b75 --- /dev/null +++ b/package/crds/roles.opensearch.upbound.io_mappings.yaml @@ -0,0 +1,744 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: mappings.roles.opensearch.upbound.io +spec: + group: roles.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Mapping + listKind: MappingList + plural: mappings + singular: mapping + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Mapping is the Schema for the Mappings API. Provides an OpenSearch + security role mapping. Please refer to the OpenSearch Access Control documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MappingSpec defines the desired state of Mapping + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + andBackendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role mapping. + Description of the role mapping. + type: string + hosts: + description: |- + (Set of String) A list of host names. + A list of host names. + items: + type: string + type: array + x-kubernetes-list-type: set + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + roleNameRef: + description: Reference to a Role in opensearch to populate roleName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleNameSelector: + description: Selector for a Role in opensearch to populate roleName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + users: + description: |- + (Set of String) A list of users. + A list of users. + items: + type: string + type: array + x-kubernetes-list-type: set + usersRefs: + description: References to User in opensearch to populate users. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + usersSelector: + description: Selector for a list of User in opensearch to populate + users. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + andBackendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role mapping. + Description of the role mapping. + type: string + hosts: + description: |- + (Set of String) A list of host names. + A list of host names. + items: + type: string + type: array + x-kubernetes-list-type: set + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + roleNameRef: + description: Reference to a Role in opensearch to populate roleName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleNameSelector: + description: Selector for a Role in opensearch to populate roleName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + users: + description: |- + (Set of String) A list of users. + A list of users. + items: + type: string + type: array + x-kubernetes-list-type: set + usersRefs: + description: References to User in opensearch to populate users. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + usersSelector: + description: Selector for a list of User in opensearch to populate + users. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MappingStatus defines the observed state of Mapping. + properties: + atProvider: + properties: + andBackendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + backendRoles: + description: |- + (Set of String) A list of backend roles. + A list of backend roles. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: |- + (String) Description of the role mapping. + Description of the role mapping. + type: string + hosts: + description: |- + (Set of String) A list of host names. + A list of host names. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: (String) The ID of this resource. + type: string + roleName: + description: |- + (String) The name of the security role. + The name of the security role. + type: string + users: + description: |- + (Set of String) A list of users. + A list of users. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/sm.opensearch.upbound.io_policies.yaml b/package/crds/sm.opensearch.upbound.io_policies.yaml new file mode 100644 index 0000000..bae375f --- /dev/null +++ b/package/crds/sm.opensearch.upbound.io_policies.yaml @@ -0,0 +1,383 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policies.sm.opensearch.upbound.io +spec: + group: sm.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Provides an OpenSearch + Snapshot Management (SM) policy. Please refer to the OpenSearch SM documentation + for details. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + policyName: + description: |- + (String) The name of the SM policy. + The name of the SM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the SM policy version. + The primary term of the SM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the SM policy version. + The sequence number of the SM policy version. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + policyName: + description: |- + (String) The name of the SM policy. + The name of the SM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the SM policy version. + The primary term of the SM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the SM policy version. + The sequence number of the SM policy version. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.body is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.body) + || (has(self.initProvider) && has(self.initProvider.body))' + - message: spec.forProvider.policyName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyName) + || (has(self.initProvider) && has(self.initProvider.policyName))' + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + body: + description: |- + (String) The policy document. + The policy document. + type: string + id: + description: (String) The ID of this resource. + type: string + policyName: + description: |- + (String) The name of the SM policy. + The name of the SM policy. + type: string + primaryTerm: + description: |- + (Number) The primary term of the SM policy version. + The primary term of the SM policy version. + type: number + seqNo: + description: |- + (Number) The sequence number of the SM policy version. + The sequence number of the SM policy version. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/snapshot.opensearch.upbound.io_repositories.yaml b/package/crds/snapshot.opensearch.upbound.io_repositories.yaml new file mode 100644 index 0000000..1de0ac3 --- /dev/null +++ b/package/crds/snapshot.opensearch.upbound.io_repositories.yaml @@ -0,0 +1,376 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: repositories.snapshot.opensearch.upbound.io +spec: + group: snapshot.opensearch.upbound.io + names: + categories: + - crossplane + - managed + - opensearch + kind: Repository + listKind: RepositoryList + plural: repositories + singular: repository + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Repository is the Schema for the Repositorys API. Provides an + OpenSearch snapshot repository resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositorySpec defines the desired state of Repository + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: |- + (String) The name of the repository. + The name of the repository. + type: string + settings: + additionalProperties: + type: string + description: |- + (Map of String) The settings map applicable for the backend, see official documentation for plugins. + The settings map applicable for the backend, see official documentation for plugins. + type: object + x-kubernetes-map-type: granular + type: + description: |- + (String) The name of the repository backend . + The name of the repository backend (required plugins must be installed). + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: |- + (String) The name of the repository. + The name of the repository. + type: string + settings: + additionalProperties: + type: string + description: |- + (Map of String) The settings map applicable for the backend, see official documentation for plugins. + The settings map applicable for the backend, see official documentation for plugins. + type: object + x-kubernetes-map-type: granular + type: + description: |- + (String) The name of the repository backend . + The name of the repository backend (required plugins must be installed). + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: RepositoryStatus defines the observed state of Repository. + properties: + atProvider: + properties: + id: + description: (String) The ID of this resource. + type: string + name: + description: |- + (String) The name of the repository. + The name of the repository. + type: string + settings: + additionalProperties: + type: string + description: |- + (Map of String) The settings map applicable for the backend, see official documentation for plugins. + The settings map applicable for the backend, see official documentation for plugins. + type: object + x-kubernetes-map-type: granular + type: + description: |- + (String) The name of the repository backend . + The name of the repository backend (required plugins must be installed). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}