Merge branch 'master' of https://gitea.freeleaps.mathmast.com/freeleaps/freeleaps-ops
This commit is contained in:
commit
215f71e0bf
@ -21,5 +21,9 @@ kube_control_plane
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-01 ansible_host=10.10.0.16 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-01
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-02 ansible_host=10.10.0.17 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-02
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-03 ansible_host=10.10.0.10 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-03
|
||||
; Added for freeleaps-data-platform, F16s V2 (16c 32G)
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-04 ansible_host=10.10.0.11 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-04
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-05 ansible_host=10.10.0.12 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-05
|
||||
prod-usw2-k8s-freeleaps-worker-nodes-06 ansible_host=10.10.0.13 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-nodes-06
|
||||
; Belows are the CI/CD dedicated nodes for the cluster
|
||||
prod-usw2-k8s-freeleaps-worker-05 ansible_host=10.10.0.14 ansible_user=wwwadmin@mathmast.com host_name=prod-usw2-k8s-freeleaps-worker-05
|
||||
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@ -0,0 +1,26 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
name: flink-kubernetes-operator
|
||||
description: A Helm chart for the Apache Flink Kubernetes Operator
|
||||
type: application
|
||||
version: 1.12.1
|
||||
appVersion: 1.12.1
|
||||
icon: https://flink.apache.org/img/logo/png/50/color_50.png
|
||||
@ -0,0 +1,60 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
# Flink job/cluster related configs
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
parallelism.default: 1
|
||||
|
||||
# These parameters are required for Java 17 support.
|
||||
# Flink 1.18 uses env.java.opts.all, if a user supplies their own version of these opts in their FlinkDeployment the options below will be overridden.
|
||||
# env.java.default-opts.all is used for 1.19 onwards so users can supply their own opts.all in their Job deployments and have these appended.
|
||||
kubernetes.operator.default-configuration.flink-version.v1_18.env.java.opts.all: --add-exports=java.base/sun.net.util=ALL-UNNAMED --add-exports=java.rmi/sun.rmi.registry=ALL-UNNAMED --add-exports=java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED
|
||||
kubernetes.operator.default-configuration.flink-version.v1_19+.env.java.default-opts.all: --add-exports=java.base/sun.net.util=ALL-UNNAMED --add-exports=java.rmi/sun.rmi.registry=ALL-UNNAMED --add-exports=java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED
|
||||
|
||||
# Flink operator related configs
|
||||
# kubernetes.operator.reconcile.interval: 60 s
|
||||
# kubernetes.operator.reconcile.parallelism: 5
|
||||
# kubernetes.operator.flink.client.cancel.timeout: 1 min
|
||||
# kubernetes.operator.resource.cleanup.timeout: 60 s
|
||||
# kubernetes.operator.observer.rest-ready.delay: 10 s
|
||||
# kubernetes.operator.observer.progress-check.interval: 10 s
|
||||
# kubernetes.operator.observer.savepoint.trigger.grace-period: 10 s
|
||||
# kubernetes.operator.flink.client.timeout: 10 s
|
||||
# kubernetes.operator.deployment.rollback.enabled: false
|
||||
# kubernetes.operator.deployment.readiness.timeout: 5min
|
||||
# kubernetes.operator.user.artifacts.base.dir: /opt/flink/artifacts
|
||||
# kubernetes.operator.job.upgrade.ignore-pending-savepoint: false
|
||||
# kubernetes.operator.watched.namespaces: ns1,ns2
|
||||
# kubernetes.operator.label.selector: flink=enabled
|
||||
# kubernetes.operator.dynamic.namespaces.enabled: false
|
||||
# kubernetes.operator.retry.initial.interval: 5 s
|
||||
# kubernetes.operator.retry.interval.multiplier: 2
|
||||
# kubernetes.operator.retry.max.attempts: 10
|
||||
# kubernetes.operator.exception.stacktrace.enabled: false
|
||||
# kubernetes.operator.exception.stacktrace.max.length: 2048
|
||||
# kubernetes.operator.exception.field.max.length: 2048
|
||||
# kubernetes.operator.exception.throwable.list.max.count: 2
|
||||
# kubernetes.operator.exception.label.mapper: Job has already been submitted:duplicatedJobFound,Server returned HTTP response code:httpResponseCodeFound
|
||||
# kubernetes.operator.leader-election.enabled: false
|
||||
# kubernetes.operator.leader-election.lease-name: flink-operator-lease
|
||||
|
||||
# kubernetes.operator.snapshot.resource.enabled: true
|
||||
# kubernetes.operator.savepoint.dispose-on-delete: true
|
||||
|
||||
# kubernetes.operator.metrics.reporter.slf4j.factory.class: org.apache.flink.metrics.slf4j.Slf4jReporterFactory
|
||||
# kubernetes.operator.metrics.reporter.slf4j.interval: 5 MINUTE
|
||||
@ -0,0 +1,65 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
# This affects logging for both user code and Flink
|
||||
rootLogger.level = INFO
|
||||
rootLogger.appenderRef.console.ref = ConsoleAppender
|
||||
rootLogger.appenderRef.rolling.ref = RollingFileAppender
|
||||
|
||||
# Uncomment this if you want to _only_ change Flink's logging
|
||||
#logger.flink.name = org.apache.flink
|
||||
#logger.flink.level = INFO
|
||||
|
||||
# The following lines keep the log level of common libraries/connectors on
|
||||
# log level INFO. The root logger does not override this. You have to manually
|
||||
# change the log levels here.
|
||||
logger.akka.name = akka
|
||||
logger.akka.level = INFO
|
||||
logger.kafka.name= org.apache.kafka
|
||||
logger.kafka.level = INFO
|
||||
logger.hadoop.name = org.apache.hadoop
|
||||
logger.hadoop.level = INFO
|
||||
logger.zookeeper.name = org.apache.zookeeper
|
||||
logger.zookeeper.level = INFO
|
||||
|
||||
# Log all infos to the console
|
||||
appender.console.name = ConsoleAppender
|
||||
appender.console.type = CONSOLE
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
|
||||
|
||||
# Log all infos in the given rolling file
|
||||
appender.rolling.name = RollingFileAppender
|
||||
appender.rolling.type = RollingFile
|
||||
appender.rolling.append = false
|
||||
appender.rolling.fileName = ${sys:log.file}
|
||||
appender.rolling.filePattern = ${sys:log.file}.%i
|
||||
appender.rolling.layout.type = PatternLayout
|
||||
appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
|
||||
appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
appender.rolling.policies.size.size=100MB
|
||||
appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.max = 10
|
||||
|
||||
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
|
||||
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
|
||||
logger.netty.level = OFF
|
||||
|
||||
# The monitor interval in seconds to enable log4j automatic reconfiguration
|
||||
# monitorInterval = 30
|
||||
@ -0,0 +1,37 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
rootLogger.level = INFO
|
||||
rootLogger.appenderRef.console.ref = ConsoleAppender
|
||||
|
||||
# Log all infos to the console
|
||||
appender.console.name = ConsoleAppender
|
||||
appender.console.type = CONSOLE
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = %style{%d}{yellow} %style{%-30c{1.}}{cyan} %highlight{[%-5level]%notEmpty{[%X{resource.namespace}/}%notEmpty{%X{resource.name}]} %msg%n%throwable}
|
||||
|
||||
# Do not log config loading
|
||||
logger.conf.name = org.apache.flink.configuration.GlobalConfiguration
|
||||
logger.conf.level = ERROR
|
||||
|
||||
# Avoid logging fallback key INFO messages
|
||||
logger.conf.name = org.apache.flink.configuration.Configuration
|
||||
logger.conf.level = ERROR
|
||||
|
||||
# The monitor interval in seconds to enable log4j automatic reconfiguration
|
||||
# monitorInterval = 30
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,273 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinksessionjobs.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
names:
|
||||
kind: FlinkSessionJob
|
||||
plural: flinksessionjobs
|
||||
shortNames:
|
||||
- sessionjob
|
||||
singular: flinksessionjob
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Last observed state of the job.
|
||||
jsonPath: .status.jobStatus.state
|
||||
name: Job Status
|
||||
priority: 0
|
||||
type: string
|
||||
- description: "Lifecycle state of the Flink resource (including being rolled\
|
||||
\ back, failed etc.)."
|
||||
jsonPath: .status.lifecycleState
|
||||
name: Lifecycle State
|
||||
priority: 0
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
deploymentName:
|
||||
type: string
|
||||
flinkConfiguration:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
job:
|
||||
properties:
|
||||
allowNonRestoredState:
|
||||
type: boolean
|
||||
args:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
autoscalerResetNonce:
|
||||
type: integer
|
||||
checkpointTriggerNonce:
|
||||
type: integer
|
||||
entryClass:
|
||||
type: string
|
||||
initialSavepointPath:
|
||||
type: string
|
||||
jarURI:
|
||||
type: string
|
||||
parallelism:
|
||||
type: integer
|
||||
savepointRedeployNonce:
|
||||
type: integer
|
||||
savepointTriggerNonce:
|
||||
type: integer
|
||||
state:
|
||||
enum:
|
||||
- running
|
||||
- suspended
|
||||
type: string
|
||||
upgradeMode:
|
||||
enum:
|
||||
- last-state
|
||||
- savepoint
|
||||
- stateless
|
||||
type: string
|
||||
type: object
|
||||
restartNonce:
|
||||
type: integer
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
jobStatus:
|
||||
properties:
|
||||
checkpointInfo:
|
||||
properties:
|
||||
formatType:
|
||||
enum:
|
||||
- FULL
|
||||
- INCREMENTAL
|
||||
- UNKNOWN
|
||||
type: string
|
||||
lastCheckpoint:
|
||||
properties:
|
||||
formatType:
|
||||
enum:
|
||||
- FULL
|
||||
- INCREMENTAL
|
||||
- UNKNOWN
|
||||
type: string
|
||||
timeStamp:
|
||||
type: integer
|
||||
triggerNonce:
|
||||
type: integer
|
||||
triggerType:
|
||||
enum:
|
||||
- MANUAL
|
||||
- PERIODIC
|
||||
- UNKNOWN
|
||||
- UPGRADE
|
||||
type: string
|
||||
type: object
|
||||
lastPeriodicCheckpointTimestamp:
|
||||
type: integer
|
||||
triggerId:
|
||||
type: string
|
||||
triggerTimestamp:
|
||||
type: integer
|
||||
triggerType:
|
||||
enum:
|
||||
- MANUAL
|
||||
- PERIODIC
|
||||
- UNKNOWN
|
||||
- UPGRADE
|
||||
type: string
|
||||
type: object
|
||||
jobId:
|
||||
type: string
|
||||
jobName:
|
||||
type: string
|
||||
savepointInfo:
|
||||
properties:
|
||||
formatType:
|
||||
enum:
|
||||
- CANONICAL
|
||||
- NATIVE
|
||||
- UNKNOWN
|
||||
type: string
|
||||
lastPeriodicSavepointTimestamp:
|
||||
type: integer
|
||||
lastSavepoint:
|
||||
properties:
|
||||
formatType:
|
||||
enum:
|
||||
- CANONICAL
|
||||
- NATIVE
|
||||
- UNKNOWN
|
||||
type: string
|
||||
location:
|
||||
type: string
|
||||
timeStamp:
|
||||
type: integer
|
||||
triggerNonce:
|
||||
type: integer
|
||||
triggerType:
|
||||
enum:
|
||||
- MANUAL
|
||||
- PERIODIC
|
||||
- UNKNOWN
|
||||
- UPGRADE
|
||||
type: string
|
||||
type: object
|
||||
savepointHistory:
|
||||
items:
|
||||
properties:
|
||||
formatType:
|
||||
enum:
|
||||
- CANONICAL
|
||||
- NATIVE
|
||||
- UNKNOWN
|
||||
type: string
|
||||
location:
|
||||
type: string
|
||||
timeStamp:
|
||||
type: integer
|
||||
triggerNonce:
|
||||
type: integer
|
||||
triggerType:
|
||||
enum:
|
||||
- MANUAL
|
||||
- PERIODIC
|
||||
- UNKNOWN
|
||||
- UPGRADE
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
triggerId:
|
||||
type: string
|
||||
triggerTimestamp:
|
||||
type: integer
|
||||
triggerType:
|
||||
enum:
|
||||
- MANUAL
|
||||
- PERIODIC
|
||||
- UNKNOWN
|
||||
- UPGRADE
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
type: string
|
||||
state:
|
||||
enum:
|
||||
- CANCELED
|
||||
- CANCELLING
|
||||
- CREATED
|
||||
- FAILED
|
||||
- FAILING
|
||||
- FINISHED
|
||||
- INITIALIZING
|
||||
- RECONCILING
|
||||
- RESTARTING
|
||||
- RUNNING
|
||||
- SUSPENDED
|
||||
type: string
|
||||
updateTime:
|
||||
type: string
|
||||
upgradeSavepointPath:
|
||||
type: string
|
||||
type: object
|
||||
lifecycleState:
|
||||
enum:
|
||||
- CREATED
|
||||
- DELETED
|
||||
- DELETING
|
||||
- DEPLOYED
|
||||
- FAILED
|
||||
- ROLLED_BACK
|
||||
- ROLLING_BACK
|
||||
- STABLE
|
||||
- SUSPENDED
|
||||
- UPGRADING
|
||||
type: string
|
||||
observedGeneration:
|
||||
type: integer
|
||||
reconciliationStatus:
|
||||
properties:
|
||||
lastReconciledSpec:
|
||||
type: string
|
||||
lastStableSpec:
|
||||
type: string
|
||||
reconciliationTimestamp:
|
||||
type: integer
|
||||
state:
|
||||
enum:
|
||||
- DEPLOYED
|
||||
- ROLLED_BACK
|
||||
- ROLLING_BACK
|
||||
- UPGRADING
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@ -0,0 +1,112 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
# Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinkstatesnapshots.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
names:
|
||||
kind: FlinkStateSnapshot
|
||||
plural: flinkstatesnapshots
|
||||
shortNames:
|
||||
- flinksnp
|
||||
singular: flinkstatesnapshot
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Final path of the snapshot.
|
||||
jsonPath: .status.path
|
||||
name: Path
|
||||
priority: 0
|
||||
type: string
|
||||
- description: Timestamp when the snapshot was last created/failed.
|
||||
jsonPath: .status.resultTimestamp
|
||||
name: Result Timestamp
|
||||
priority: 0
|
||||
type: string
|
||||
- description: Current state of the snapshot.
|
||||
jsonPath: .status.state
|
||||
name: Snapshot State
|
||||
priority: 0
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
backoffLimit:
|
||||
type: integer
|
||||
checkpoint:
|
||||
type: object
|
||||
jobReference:
|
||||
properties:
|
||||
kind:
|
||||
enum:
|
||||
- FlinkDeployment
|
||||
- FlinkSessionJob
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
savepoint:
|
||||
properties:
|
||||
alreadyExists:
|
||||
type: boolean
|
||||
disposeOnDelete:
|
||||
type: boolean
|
||||
formatType:
|
||||
enum:
|
||||
- CANONICAL
|
||||
- NATIVE
|
||||
- UNKNOWN
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
failures:
|
||||
type: integer
|
||||
path:
|
||||
type: string
|
||||
resultTimestamp:
|
||||
type: string
|
||||
state:
|
||||
enum:
|
||||
- ABANDONED
|
||||
- COMPLETED
|
||||
- FAILED
|
||||
- IN_PROGRESS
|
||||
- TRIGGER_PENDING
|
||||
type: string
|
||||
triggerId:
|
||||
type: string
|
||||
triggerTimestamp:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@ -0,0 +1,197 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flink-operator.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flink-operator.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flink-operator.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "flink-operator.labels" -}}
|
||||
{{ include "flink-operator.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
helm.sh/chart: {{ include "flink-operator.chart" . }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "flink-operator.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "flink-operator.name" . }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the path of the operator image to use
|
||||
*/}}
|
||||
{{- define "flink-operator.imagePath" -}}
|
||||
{{- if .Values.image.digest }}
|
||||
{{- .Values.image.repository }}@{{ .Values.image.digest }}
|
||||
{{- else }}
|
||||
{{- .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the operator role to use
|
||||
*/}}
|
||||
{{- define "flink-operator.roleName" -}}
|
||||
{{- if .Values.rbac.operatorRole.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.rbac.operatorRole.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.rbac.operatorRole.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the operator role binding to use
|
||||
*/}}
|
||||
{{- define "flink-operator.roleBindingName" -}}
|
||||
{{- if .Values.rbac.operatorRoleBinding.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.rbac.operatorRoleBinding.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.rbac.operatorRoleBinding.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the job role to use
|
||||
*/}}
|
||||
{{- define "flink-operator.jobRoleName" -}}
|
||||
{{- if .Values.rbac.jobRoleBinding.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.rbac.jobRole.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.rbac.jobRole.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the job role to use
|
||||
*/}}
|
||||
{{- define "flink-operator.jobRoleBindingName" -}}
|
||||
{{- if .Values.rbac.jobRole.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.rbac.jobRoleBinding.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.rbac.jobRoleBinding.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the name of the operator service account to use
|
||||
*/}}
|
||||
{{- define "flink-operator.serviceAccountName" -}}
|
||||
{{- if .Values.operatorServiceAccount.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.operatorServiceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.operatorServiceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the job service account to use
|
||||
*/}}
|
||||
{{- define "flink-operator.jobServiceAccountName" -}}
|
||||
{{- if .Values.jobServiceAccount.create }}
|
||||
{{- default (include "flink-operator.fullname" .) .Values.jobServiceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.jobServiceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Determine role scope based on name
|
||||
*/}}
|
||||
{{- define "flink-operator.roleScope" -}}
|
||||
{{- if contains ":" .role }}
|
||||
{{- printf "ClusterRole" }}
|
||||
{{- else }}
|
||||
{{- printf "Role" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "flink-operator.validating-webhook-enabled" -}}
|
||||
{{- if hasKey .Values.webhook "validator" }}
|
||||
{{- if .Values.webhook.validator.create }}
|
||||
{{- printf "true" }}
|
||||
{{- else }}
|
||||
{{- printf "false" }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if or (.Values.webhook.create) }}
|
||||
{{- printf "true" }}
|
||||
{{- else }}
|
||||
{{- printf "false" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "flink-operator.mutating-webhook-enabled" -}}
|
||||
{{- if hasKey .Values.webhook "mutator" }}
|
||||
{{- if .Values.webhook.mutator.create }}
|
||||
{{- printf "true" }}
|
||||
{{- else }}
|
||||
{{- printf "false" }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if or (.Values.webhook.create) }}
|
||||
{{- printf "true" }}
|
||||
{{- else }}
|
||||
{{- printf "false" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "flink-operator.webhook-enabled" -}}
|
||||
{{- if or (eq (include "flink-operator.validating-webhook-enabled" .) "true") (eq (include "flink-operator.mutating-webhook-enabled" .) "true") }}
|
||||
{{- printf "true" }}
|
||||
{{- else }}
|
||||
{{- printf "false" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,328 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flink-operator.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
{{- if index (.Values.operatorPod) "labels" }}
|
||||
{{- with .Values.operatorPod.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
strategy:
|
||||
{{- toYaml .Values.strategy | nindent 4 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "flink-operator.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "flink-operator.selectorLabels" . | nindent 8 }}
|
||||
{{- if index (.Values.operatorPod) "labels" }}
|
||||
{{- with .Values.operatorPod.labels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
|
||||
{{- if index (.Values.operatorPod) "annotations" }}
|
||||
{{- with .Values.operatorPod.annotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.operatorPod.priorityClassName }}
|
||||
priorityClassName: {{ . }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- if .Values.operatorPod.nodeSelector }}
|
||||
nodeSelector: {{ toYaml .Values.operatorPod.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.operatorPod.affinity }}
|
||||
affinity: {{ toYaml .Values.operatorPod.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operatorPod.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "flink-operator.serviceAccountName" . }}
|
||||
{{- if .Values.operatorPod.topologySpreadConstraints }}
|
||||
topologySpreadConstraints: {{ toYaml .Values.operatorPod.topologySpreadConstraints | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.operatorPod.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml .Values.operatorPod.initContainers | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: {{ include "flink-operator.imagePath" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command: ["/docker-entrypoint.sh", "operator"]
|
||||
ports:
|
||||
{{- if .Values.metrics.port }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- if index .Values "operatorHealth" }}
|
||||
- containerPort: {{ .Values.operatorHealth.port }}
|
||||
name: health-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
env:
|
||||
- name: OPERATOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: {{ include "flink-operator.name" . }}
|
||||
- name: FLINK_CONF_DIR
|
||||
value: /opt/flink/conf
|
||||
- name: FLINK_PLUGINS_DIR
|
||||
value: /opt/flink/plugins
|
||||
- name: LOG_CONFIG
|
||||
value: {{ .Values.jvmArgs.logConfig }}
|
||||
- name: JVM_ARGS
|
||||
value: {{ .Values.jvmArgs.operator }}
|
||||
{{- if .Values.tls.create }}
|
||||
- name: OPERATOR_KEYSTORE_PATH
|
||||
value: /opt/flink/tls-cert/keystore.jks
|
||||
- name: OPERATOR_TRUSTSTORE_PATH
|
||||
value: /opt/flink/tls-cert/truststore.jks
|
||||
- name: OPERATOR_KEYSTORE_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- toYaml .Values.tls.secretKeyRef | nindent 18 }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
{{- with .Values.operatorPod.env }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operatorPod.envFrom }}
|
||||
envFrom:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.operatorPod.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.operatorSecurityContext | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: flink-operator-config-volume
|
||||
mountPath: /opt/flink/conf
|
||||
{{- if .Values.operatorVolumeMounts.create }}
|
||||
{{- toYaml .Values.operatorVolumeMounts.data | nindent 12 }}
|
||||
{{- else }}
|
||||
- name: flink-artifacts-volume
|
||||
mountPath: /opt/flink/artifacts
|
||||
{{- end }}
|
||||
{{- if .Values.tls.create }}
|
||||
- name: flink-operator-cert-secret
|
||||
mountPath: /opt/flink/tls-cert
|
||||
{{- end }}
|
||||
{{- if and (index .Values "operatorHealth") (index .Values.operatorHealth "livenessProbe") }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.operatorHealth.livenessProbe | nindent 12 }}
|
||||
httpGet:
|
||||
path: /
|
||||
port: health-port
|
||||
{{- end }}
|
||||
{{- if and (index .Values "operatorHealth") (index .Values.operatorHealth "startupProbe") }}
|
||||
startupProbe:
|
||||
{{- toYaml .Values.operatorHealth.startupProbe | nindent 12 }}
|
||||
httpGet:
|
||||
path: /
|
||||
port: health-port
|
||||
{{- end }}
|
||||
{{- if .Values.postStart }}
|
||||
lifecycle:
|
||||
postStart:
|
||||
{{- toYaml .Values.postStart | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if eq (include "flink-operator.webhook-enabled" .) "true" }}
|
||||
- name: flink-webhook
|
||||
image: {{ include "flink-operator.imagePath" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command: ["/docker-entrypoint.sh", "webhook"]
|
||||
env:
|
||||
- name: WEBHOOK_KEYSTORE_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- if .Values.webhook.keystore.useDefaultPassword }}
|
||||
name: flink-operator-webhook-secret
|
||||
key: password
|
||||
{{- else }}
|
||||
{{- with .Values.webhook.keystore.passwordSecretRef }}
|
||||
{{- toYaml . | nindent 18 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: WEBHOOK_KEYSTORE_FILE
|
||||
value: "/certs/keystore.p12"
|
||||
- name: WEBHOOK_KEYSTORE_TYPE
|
||||
value: "pkcs12"
|
||||
- name: WEBHOOK_SERVER_PORT
|
||||
value: "9443"
|
||||
- name: LOG_CONFIG
|
||||
value: {{ .Values.jvmArgs.logConfig }}
|
||||
- name: JVM_ARGS
|
||||
value: {{ .Values.jvmArgs.webhook }}
|
||||
- name: FLINK_CONF_DIR
|
||||
value: /opt/flink/conf
|
||||
- name: FLINK_PLUGINS_DIR
|
||||
value: /opt/flink/plugins
|
||||
- name: OPERATOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.operatorPod.webhook.container.env }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.operatorPod.webhook.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.webhookSecurityContext | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: keystore
|
||||
mountPath: "/certs"
|
||||
readOnly: true
|
||||
- name: flink-operator-config-volume
|
||||
mountPath: /opt/flink/conf
|
||||
{{- end }}
|
||||
{{- if .Values.operatorPod.sidecarContainers }}
|
||||
{{- toYaml .Values.operatorPod.sidecarContainers | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if index (.Values.operatorPod) "dnsPolicy" }}
|
||||
dnsPolicy: {{ .Values.operatorPod.dnsPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if index (.Values.operatorPod) "dnsConfig" }}
|
||||
dnsConfig:
|
||||
{{- with .Values.operatorPod.dnsConfig }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: flink-operator-config-volume
|
||||
configMap:
|
||||
name: flink-operator-config
|
||||
items:
|
||||
{{- if hasKey .Values.defaultConfiguration "flink-conf.yaml" }}
|
||||
- key: flink-conf.yaml
|
||||
path: flink-conf.yaml
|
||||
{{- else }}
|
||||
- key: config.yaml
|
||||
path: config.yaml
|
||||
{{- end }}
|
||||
- key: log4j-operator.properties
|
||||
path: log4j-operator.properties
|
||||
- key: log4j-console.properties
|
||||
path: log4j-console.properties
|
||||
{{- if .Values.operatorVolumes.create }}
|
||||
{{- toYaml .Values.operatorVolumes.data | nindent 8 }}
|
||||
{{- else }}
|
||||
- name: flink-artifacts-volume
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if eq (include "flink-operator.webhook-enabled" .) "true" }}
|
||||
- name: keystore
|
||||
secret:
|
||||
secretName: webhook-server-cert
|
||||
items:
|
||||
- key: keystore.p12
|
||||
path: keystore.p12
|
||||
{{- end }}
|
||||
{{- if .Values.tls.create }}
|
||||
- name: flink-operator-cert-secret
|
||||
secret:
|
||||
secretName: {{ .Values.tls.secretName }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
---
|
||||
{{- if .Values.defaultConfiguration.create }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: flink-operator-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |+
|
||||
{{- if .Values.defaultConfiguration.append }}
|
||||
{{- $.Files.Get "conf/flink-conf.yaml" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if hasKey (.Values.defaultConfiguration) "config.yaml" }}
|
||||
{{- index (.Values.defaultConfiguration) "config.yaml" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.watchNamespaces }}
|
||||
kubernetes.operator.watched.namespaces: {{ join "," .Values.watchNamespaces }}
|
||||
{{- end }}
|
||||
{{- if index .Values "operatorHealth" }}
|
||||
kubernetes.operator.health.probe.enabled: true
|
||||
kubernetes.operator.health.probe.port: {{ .Values.operatorHealth.port }}
|
||||
{{- end }}
|
||||
flink-conf.yaml: |+
|
||||
{{- if .Values.defaultConfiguration.append }}
|
||||
{{- $.Files.Get "conf/flink-conf.yaml" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if hasKey (.Values.defaultConfiguration) "flink-conf.yaml" }}
|
||||
{{- index (.Values.defaultConfiguration) "flink-conf.yaml" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.watchNamespaces }}
|
||||
kubernetes.operator.watched.namespaces: {{ join "," .Values.watchNamespaces }}
|
||||
{{- end }}
|
||||
{{- if index .Values "operatorHealth" }}
|
||||
kubernetes.operator.health.probe.enabled: true
|
||||
kubernetes.operator.health.probe.port: {{ .Values.operatorHealth.port }}
|
||||
{{- end }}
|
||||
log4j-operator.properties: |+
|
||||
{{- if .Values.defaultConfiguration.append }}
|
||||
{{- $.Files.Get "conf/log4j-operator.properties" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if index (.Values.defaultConfiguration) "log4j-operator.properties" }}
|
||||
{{- index (.Values.defaultConfiguration) "log4j-operator.properties" | nindent 4 -}}
|
||||
{{- end }}
|
||||
log4j-console.properties: |+
|
||||
{{- if .Values.defaultConfiguration.append }}
|
||||
{{- $.Files.Get "conf/log4j-console.properties" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if index (.Values.defaultConfiguration) "log4j-console.properties" }}
|
||||
{{- index (.Values.defaultConfiguration) "log4j-console.properties" | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,351 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
{{/*
|
||||
RBAC rules used to create the operator (cluster)role based on the scope
|
||||
*/}}
|
||||
{{- define "flink-operator.rbacRules" }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- deletecollection
|
||||
{{- if .Values.rbac.nodesRule.create }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- deployments/finalizers
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments/scale
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- flink.apache.org
|
||||
resources:
|
||||
- flinkdeployments
|
||||
- flinkdeployments/finalizers
|
||||
- flinksessionjobs
|
||||
- flinksessionjobs/finalizers
|
||||
- flinkstatesnapshots
|
||||
- flinkstatesnapshots/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- flink.apache.org
|
||||
resources:
|
||||
- flinkdeployments/status
|
||||
- flinksessionjobs/status
|
||||
- flinkstatesnapshots/status
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
RBAC rules used to create the job (cluster)role based on the scope
|
||||
*/}}
|
||||
{{- define "flink-operator.jobRbacRules" }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
{{- if .Values.rbac.create }}
|
||||
---
|
||||
{{/*
|
||||
Namespaced scoped RBAC.
|
||||
*/}}
|
||||
{{- if .Values.watchNamespaces }}
|
||||
{{- range .Values.watchNamespaces }}
|
||||
{{- if $.Values.rbac.operatorRole.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
namespace: {{ . }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
{{- template "flink-operator.rbacRules" $ }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if $.Values.rbac.jobRole.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobRoleName" $ }}
|
||||
namespace: {{ . }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- template "flink-operator.jobRbacRules" $ }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if $.Values.rbac.operatorRoleBinding.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleBindingName" $ }}
|
||||
namespace: {{ . }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
roleRef:
|
||||
kind: {{ $role := include "flink-operator.roleName" $ }}{{ include "flink-operator.roleScope" (dict "role" $role)}}
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "flink-operator.serviceAccountName" $ }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if $.Values.rbac.jobRoleBinding.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobRoleBindingName" $ }}
|
||||
namespace: {{ . }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
roleRef:
|
||||
kind: {{ $role := include "flink-operator.jobRoleName" $ }}{{ include "flink-operator.roleScope" (dict "role" $role)}}
|
||||
name: {{ include "flink-operator.jobRoleName" $ }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "flink-operator.jobServiceAccountName" $ }}
|
||||
namespace: {{ . }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
{{/*
|
||||
Give operator the ability to operate on leases in the release namespace
|
||||
*/}}
|
||||
{{- if and .Values.rbac.operatorRole.create (not (has .Release.Namespace .Values.watchNamespaces)) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
{{- end }}
|
||||
---
|
||||
{{- if and .Values.rbac.operatorRole.create (not (has .Release.Namespace .Values.watchNamespaces)) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleBindingName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "flink-operator.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{ else }}
|
||||
{{/*
|
||||
Cluster scoped RBAC.
|
||||
*/}}
|
||||
---
|
||||
{{- if .Values.rbac.operatorRole.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
{{- template "flink-operator.rbacRules" $ }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if .Values.rbac.jobRole.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobRoleName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- template "flink-operator.jobRbacRules" $ }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if .Values.rbac.operatorRoleBinding.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "flink-operator.roleBindingName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "flink-operator.roleName" $ }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "flink-operator.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if .Values.rbac.jobRoleBinding.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobRoleBindingName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "flink-operator.jobRoleName" $ }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "flink-operator.jobServiceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,70 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
---
|
||||
{{- if .Values.operatorServiceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "flink-operator.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" . | nindent 4 }}
|
||||
{{- with .Values.operatorServiceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- if .Values.jobServiceAccount.create -}}
|
||||
{{/*
|
||||
Create job service accounts for all watched namespaces.
|
||||
*/}}
|
||||
{{- if .Values.watchNamespaces }}
|
||||
{{- range .Values.watchNamespaces }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobServiceAccountName" $ }}
|
||||
namespace: {{ . }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
{{- with $.Values.jobServiceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
{{/*
|
||||
Create the job service account for the operator namespace, it is to be added for other namespaces manually
|
||||
(or via specifying them in watchNamespaces).
|
||||
*/}}
|
||||
{{- else}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "flink-operator.jobServiceAccountName" $ }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "flink-operator.labels" $ | nindent 4 }}
|
||||
{{- with .Values.jobServiceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,157 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
---
|
||||
{{- if eq (include "flink-operator.webhook-enabled" .) "true" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flink-operator-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.webhook.serviceLabels }}
|
||||
labels:
|
||||
{{- range $key, $value := .Values.webhook.serviceLabels }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 9443
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "flink-operator.name" . }}
|
||||
---
|
||||
{{- if .Values.webhook.keystore.useDefaultPassword }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: flink-operator-webhook-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
password: cGFzc3dvcmQxMjM0
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: flink-operator-serving-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
dnsNames:
|
||||
- flink-operator-webhook-service.{{ .Release.Namespace }}.svc
|
||||
- flink-operator-webhook-service.{{ .Release.Namespace }}.svc.cluster.local
|
||||
keystores:
|
||||
pkcs12:
|
||||
create: true
|
||||
passwordSecretRef:
|
||||
{{- if .Values.webhook.keystore.useDefaultPassword }}
|
||||
name: flink-operator-webhook-secret
|
||||
key: password
|
||||
{{- else }}
|
||||
{{- with .Values.webhook.keystore.passwordSecretRef }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: flink-operator-selfsigned-issuer
|
||||
commonName: FlinkDeployment Validator
|
||||
secretName: webhook-server-cert
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: flink-operator-selfsigned-issuer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- end }}
|
||||
{{- if eq (include "flink-operator.validating-webhook-enabled" .) "true" }}
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/flink-operator-serving-cert
|
||||
name: flink-operator-{{ .Release.Namespace }}-webhook-configuration
|
||||
webhooks:
|
||||
- name: validationwebhook.flink.apache.org
|
||||
admissionReviewVersions: ["v1"]
|
||||
clientConfig:
|
||||
service:
|
||||
name: flink-operator-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate
|
||||
failurePolicy: Fail
|
||||
rules:
|
||||
- apiGroups: ["flink.apache.org"]
|
||||
apiVersions: ["*"]
|
||||
scope: "Namespaced"
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- flinkdeployments
|
||||
- flinksessionjobs
|
||||
- flinkstatesnapshots
|
||||
sideEffects: None
|
||||
{{- if .Values.watchNamespaces }}
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/metadata.name
|
||||
operator: In
|
||||
values: [{{- range .Values.watchNamespaces }}{{ . | quote }},{{- end}}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq (include "flink-operator.mutating-webhook-enabled" .) "true" }}
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/flink-operator-serving-cert
|
||||
name: flink-operator-{{ .Release.Namespace }}-webhook-configuration
|
||||
webhooks:
|
||||
- name: mutationwebhook.flink.apache.org
|
||||
admissionReviewVersions: ["v1"]
|
||||
clientConfig:
|
||||
service:
|
||||
name: flink-operator-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate
|
||||
failurePolicy: Fail
|
||||
rules:
|
||||
- apiGroups: ["flink.apache.org"]
|
||||
apiVersions: ["*"]
|
||||
scope: "Namespaced"
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- flinksessionjobs
|
||||
- flinkdeployments
|
||||
sideEffects: None
|
||||
{{- if .Values.watchNamespaces }}
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/metadata.name
|
||||
operator: In
|
||||
values: [{{- range .Values.watchNamespaces }}{{ . | quote }},{{- end}}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -0,0 +1,219 @@
|
||||
################################################################################
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
################################################################################
|
||||
|
||||
---
|
||||
|
||||
# List of kubernetes namespaces to watch for FlinkDeployment changes, empty means all namespaces.
|
||||
# When enabled RBAC is only created for said namespaces, otherwise it is done for the cluster scope.
|
||||
# watchNamespaces: ["flink"]
|
||||
|
||||
image:
|
||||
repository: ghcr.io/apache/flink-kubernetes-operator
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "0315e91"
|
||||
# If image digest is set then it takes precedence and the image tag will be ignored
|
||||
# digest: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
# Replicas must be 1 unless operator leader election is configured
|
||||
replicas: 1
|
||||
|
||||
# Strategy type must be Recreate unless leader election is configured
|
||||
strategy:
|
||||
type: Recreate
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
# kubernetes.rest-service.exposed.type: NodePort requires
|
||||
# list permission for nodes at the cluster scope.
|
||||
# Set create to true if you are using NodePort type.
|
||||
nodesRule:
|
||||
create: false
|
||||
operatorRole:
|
||||
create: true
|
||||
name: "flink-operator"
|
||||
operatorRoleBinding:
|
||||
create: true
|
||||
name: "flink-operator-role-binding"
|
||||
jobRole:
|
||||
create: true
|
||||
name: "flink"
|
||||
jobRoleBinding:
|
||||
create: true
|
||||
name: "flink-role-binding"
|
||||
|
||||
operatorPod:
|
||||
priorityClassName: null
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# The env variables only apply to the operator container in the operator pod
|
||||
# TODO: consider making this pod level env variables
|
||||
env:
|
||||
# - name: ""
|
||||
# value: ""
|
||||
# - name: ""
|
||||
# valueFrom:
|
||||
# configMapKeyRef:
|
||||
# name: ""
|
||||
# key: ""
|
||||
# dnsPolicy: ""
|
||||
# dnsConfig: {}
|
||||
# Node labels and affinity for operator pod assignment
|
||||
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
envFrom:
|
||||
# - configMapRef:
|
||||
# name: ""
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
# Node tolerations for operator pod assignment
|
||||
# https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||
tolerations: []
|
||||
# Topology spread constrains
|
||||
# https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
|
||||
topologySpreadConstraints: []
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "250m"
|
||||
# memory: "512Mi"
|
||||
# requests:
|
||||
# cpu: "250m"
|
||||
# memory: "512Mi"
|
||||
webhook:
|
||||
resources: {}
|
||||
container:
|
||||
env:
|
||||
# - name: ""
|
||||
# value: ""
|
||||
# optional init containers for operator pod
|
||||
initContainers: []
|
||||
|
||||
# optional extra containers for operator pod
|
||||
sidecarContainers: []
|
||||
|
||||
operatorServiceAccount:
|
||||
create: true
|
||||
annotations: {}
|
||||
name: "flink-operator"
|
||||
|
||||
jobServiceAccount:
|
||||
create: true
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
name: "flink"
|
||||
|
||||
operatorVolumeMounts:
|
||||
create: false
|
||||
data:
|
||||
- name: flink-artifacts
|
||||
mountPath: /opt/flink/artifacts
|
||||
|
||||
operatorVolumes:
|
||||
create: false
|
||||
data:
|
||||
- name: flink-artifacts
|
||||
hostPath:
|
||||
path: /tmp/flink/artifacts
|
||||
type: DirectoryOrCreate
|
||||
# - name: flink-artifacts
|
||||
# persistentVolumeClaim:
|
||||
# claimName: flink-artifacts
|
||||
|
||||
podSecurityContext:
|
||||
runAsUser: 9999
|
||||
runAsGroup: 9999
|
||||
# fsGroup: 9999
|
||||
|
||||
operatorSecurityContext: {}
|
||||
|
||||
webhookSecurityContext: {}
|
||||
|
||||
webhook:
|
||||
create: true
|
||||
# validator:
|
||||
# create: true
|
||||
# mutator:
|
||||
# create: true
|
||||
keystore:
|
||||
useDefaultPassword: true
|
||||
# passwordSecretRef:
|
||||
# name: jks-password-secret
|
||||
# key: password-key
|
||||
serviceLabels: {}
|
||||
|
||||
defaultConfiguration:
|
||||
# If set to true, creates ConfigMaps/VolumeMounts. If set to false, no configuration will be created.
|
||||
# All below fields will be ignored if create is set to false.
|
||||
create: true
|
||||
# If set to true,
|
||||
# (1) loads the built-in default configuration
|
||||
# (2) appends the below flink-conf and logging configuration overrides
|
||||
# If set to false, loads just the overrides as in (2).
|
||||
# This option has not effect, if create is equal to false.
|
||||
append: true
|
||||
flink-conf.yaml: |+
|
||||
# Flink Config Overrides
|
||||
kubernetes.operator.metrics.reporter.slf4j.factory.class: org.apache.flink.metrics.slf4j.Slf4jReporterFactory
|
||||
kubernetes.operator.metrics.reporter.slf4j.interval: 5 MINUTE
|
||||
|
||||
kubernetes.operator.reconcile.interval: 15 s
|
||||
kubernetes.operator.observer.progress-check.interval: 5 s
|
||||
log4j-operator.properties: |+
|
||||
# Flink Operator Logging Overrides
|
||||
# rootLogger.level = DEBUG
|
||||
# logger.operator.name= org.apache.flink.kubernetes.operator
|
||||
# logger.operator.level = DEBUG
|
||||
log4j-console.properties: |+
|
||||
# Flink Deployment Logging Overrides
|
||||
# rootLogger.level = DEBUG
|
||||
|
||||
# (Optional) Exposes metrics port on the container if defined
|
||||
metrics:
|
||||
port:
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# Set the jvm start up options for webhook and operator
|
||||
jvmArgs:
|
||||
webhook: ""
|
||||
operator: ""
|
||||
logConfig: "-Dlog4j.configurationFile=/opt/flink/conf/log4j-operator.properties"
|
||||
|
||||
# Configure health probes for the operator
|
||||
operatorHealth:
|
||||
port: 8085
|
||||
livenessProbe:
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 30
|
||||
startupProbe:
|
||||
failureThreshold: 30
|
||||
periodSeconds: 10
|
||||
|
||||
# Set postStart hook of the main container
|
||||
postStart: {}
|
||||
|
||||
# Configuration for tls
|
||||
tls:
|
||||
create: false
|
||||
secretName: flink-operator-cert
|
||||
secretKeyRef:
|
||||
name: operator-certificate-password
|
||||
key: password
|
||||
@ -1,235 +0,0 @@
|
||||
# Flink High Availability Cluster Deployment
|
||||
|
||||
## Overview
|
||||
This project uses Apache Flink Kubernetes Operator to deploy a high availability Flink cluster with persistent storage and automatic failover capabilities.
|
||||
|
||||
## Component Architecture
|
||||
- **JobManager**: 2 replicas with high availability configuration
|
||||
- **TaskManager**: 3 replicas for distributed processing
|
||||
- **High Availability**: Kubernetes-based HA with persistent storage
|
||||
- **Checkpointing**: Persistent checkpoints and savepoints storage
|
||||
|
||||
## File Description
|
||||
|
||||
### 1. flink-operator-v2.yaml
|
||||
Flink Kubernetes Operator deployment configuration:
|
||||
- Operator deployment in `flink-system` namespace
|
||||
- RBAC configuration for cluster-wide permissions
|
||||
- Health checks and resource limits
|
||||
- Enhanced CRD definitions with additional printer columns
|
||||
|
||||
### 2. flink-crd.yaml
|
||||
Custom Resource Definitions for Flink:
|
||||
- FlinkDeployment CRD
|
||||
- FlinkSessionJob CRD
|
||||
- Required for Flink Operator to function
|
||||
|
||||
### 3. ha-flink-cluster-v2.yaml
|
||||
Production-ready HA Flink cluster configuration:
|
||||
- 2 JobManager replicas with HA enabled
|
||||
- 3 TaskManager replicas with anti-affinity rules
|
||||
- Persistent storage for HA data, checkpoints, and savepoints
|
||||
- Memory and CPU resource allocation
|
||||
- Exponential delay restart strategy
|
||||
- Proper volume mounts and storage configuration
|
||||
|
||||
### 4. simple-ha-flink-cluster.yaml
|
||||
Simplified HA Flink cluster configuration:
|
||||
- Uses ephemeral storage to avoid PVC binding issues
|
||||
- Basic HA configuration for testing and development
|
||||
- Minimal resource requirements
|
||||
- Recommended for development and testing
|
||||
|
||||
### 5. flink-storage.yaml
|
||||
Storage and RBAC configuration:
|
||||
- PersistentVolumeClaims for HA data, checkpoints, and savepoints
|
||||
- ServiceAccount and RBAC permissions for Flink cluster
|
||||
- Azure Disk storage class configuration with correct access modes
|
||||
|
||||
### 6. flink-rbac.yaml
|
||||
Enhanced RBAC configuration:
|
||||
- Complete permissions for Flink HA functionality
|
||||
- Both namespace-level and cluster-level permissions
|
||||
- Includes watch permissions for HA operations
|
||||
|
||||
## Deployment Steps
|
||||
|
||||
### 1. Install Flink Operator
|
||||
```bash
|
||||
# Apply Flink Operator configuration
|
||||
kubectl apply -f flink-operator-v2.yaml
|
||||
|
||||
# Verify operator installation
|
||||
kubectl get pods -n flink-system
|
||||
```
|
||||
|
||||
### 2. Create Storage Resources (Optional - for production)
|
||||
```bash
|
||||
# Apply storage configuration
|
||||
kubectl apply -f flink-storage.yaml
|
||||
|
||||
# Verify PVC creation
|
||||
kubectl get pvc -n freeleaps-data-platform
|
||||
```
|
||||
|
||||
### 3. Deploy HA Flink Cluster
|
||||
```bash
|
||||
# Option A: Deploy with persistent storage (production)
|
||||
kubectl apply -f ha-flink-cluster-v2.yaml
|
||||
|
||||
# Option B: Deploy with ephemeral storage (development/testing)
|
||||
kubectl apply -f simple-ha-flink-cluster.yaml
|
||||
|
||||
# Check deployment status
|
||||
kubectl get flinkdeployments -n freeleaps-data-platform
|
||||
kubectl get pods -n freeleaps-data-platform -l app=flink
|
||||
```
|
||||
|
||||
## High Availability Features
|
||||
- **JobManager HA**: 2 JobManager replicas with Kubernetes-based leader election
|
||||
- **Persistent State**: Checkpoints and savepoints stored on persistent volumes
|
||||
- **Automatic Failover**: Exponential delay restart strategy with backoff
|
||||
- **Pod Anti-affinity**: Ensures components are distributed across different nodes
|
||||
- **Storage Persistence**: HA data, checkpoints, and savepoints persist across restarts
|
||||
|
||||
## Network Configuration
|
||||
- **JobManager**: Port 8081 (Web UI), 6123 (RPC), 6124 (Blob Server)
|
||||
- **TaskManager**: Port 6121 (Data), 6122 (RPC), 6126 (Metrics)
|
||||
- **Service Type**: ClusterIP for internal communication
|
||||
|
||||
## Storage Configuration
|
||||
- **HA Data**: 10Gi for high availability metadata
|
||||
- **Checkpoints**: 20Gi for application checkpoints
|
||||
- **Savepoints**: 20Gi for manual savepoints
|
||||
- **Storage Class**: azure-disk-std-ssd-lrs
|
||||
- **Access Mode**: ReadWriteOnce (Azure Disk limitation)
|
||||
|
||||
## Monitoring and Operations
|
||||
- **Health Checks**: Built-in readiness and liveness probes
|
||||
- **Web UI**: Accessible through JobManager service
|
||||
- **Metrics**: Exposed on port 8080 for Prometheus collection
|
||||
- **Logging**: Centralized logging through Kubernetes
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### High Availability Settings
|
||||
- **Type**: kubernetes (native Kubernetes HA)
|
||||
- **Storage**: Persistent volume for HA metadata
|
||||
- **Cluster ID**: ha-flink-cluster-v2
|
||||
|
||||
### Checkpointing Configuration
|
||||
- **Interval**: 60 seconds
|
||||
- **Timeout**: 10 minutes
|
||||
- **Min Pause**: 5 seconds
|
||||
- **Backend**: Filesystem with persistent storage
|
||||
|
||||
### Resource Allocation
|
||||
- **JobManager**: 0.5 CPU, 1024MB memory (HA), 1.0 CPU, 1024MB memory (Simple)
|
||||
- **TaskManager**: 0.5 CPU, 2048MB memory (HA), 2.0 CPU, 2048MB memory (Simple)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues and Solutions
|
||||
|
||||
#### 1. PVC Binding Issues
|
||||
```bash
|
||||
# Check PVC status
|
||||
kubectl get pvc -n freeleaps-data-platform
|
||||
|
||||
# PVC stuck in Pending state - usually due to:
|
||||
# - Insufficient storage quota
|
||||
# - Wrong access mode (ReadWriteMany not supported by Azure Disk)
|
||||
# - Storage class not available
|
||||
|
||||
# Solution: Use ReadWriteOnce access mode or ephemeral storage
|
||||
```
|
||||
|
||||
#### 2. Pod CrashLoopBackOff
|
||||
```bash
|
||||
# Check pod status
|
||||
kubectl get pods -n freeleaps-data-platform -l app=flink
|
||||
|
||||
# Check pod logs
|
||||
kubectl logs <pod-name> -n freeleaps-data-platform
|
||||
|
||||
# Check pod events
|
||||
kubectl describe pod <pod-name> -n freeleaps-data-platform
|
||||
```
|
||||
|
||||
#### 3. ServiceAccount Issues
|
||||
```bash
|
||||
# Verify ServiceAccount exists
|
||||
kubectl get serviceaccount -n freeleaps-data-platform
|
||||
|
||||
# Check RBAC permissions
|
||||
kubectl get rolebinding -n freeleaps-data-platform
|
||||
```
|
||||
|
||||
#### 4. Storage Path Issues
|
||||
```bash
|
||||
# Ensure storage paths match volume mounts
|
||||
# For persistent storage: /opt/flink/ha-data, /opt/flink/checkpoints
|
||||
# For ephemeral storage: /tmp/flink/ha-data, /tmp/flink/checkpoints
|
||||
```
|
||||
|
||||
### Diagnostic Commands
|
||||
```bash
|
||||
# Check Flink Operator logs
|
||||
kubectl logs -n flink-system -l app.kubernetes.io/name=flink-kubernetes-operator
|
||||
|
||||
# Check Flink cluster status
|
||||
kubectl describe flinkdeployment <cluster-name> -n freeleaps-data-platform
|
||||
|
||||
# Check pod events
|
||||
kubectl get events -n freeleaps-data-platform --sort-by='.lastTimestamp'
|
||||
|
||||
# Check storage status
|
||||
kubectl get pvc -n freeleaps-data-platform
|
||||
kubectl describe pvc <pvc-name> -n freeleaps-data-platform
|
||||
|
||||
# Check operator status
|
||||
kubectl get pods -n flink-system
|
||||
kubectl logs -n flink-system deployment/flink-kubernetes-operator
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
1. **Storage Limitations**: Azure Disk storage class only supports ReadWriteOnce access mode
|
||||
2. **ServiceAccount**: Ensure the correct ServiceAccount is specified in cluster configuration
|
||||
3. **Resource Requirements**: Verify cluster has enough CPU/memory for all replicas
|
||||
4. **Network Policies**: May need adjustment for inter-pod communication
|
||||
5. **Ephemeral vs Persistent**: Use ephemeral storage for development/testing, persistent for production
|
||||
|
||||
## Quick Start (Recommended for Testing)
|
||||
```bash
|
||||
# 1. Deploy operator
|
||||
kubectl apply -f flink-operator-v2.yaml
|
||||
|
||||
# 2. Wait for operator to be ready
|
||||
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=flink-kubernetes-operator -n flink-system
|
||||
|
||||
# 3. Deploy simple HA cluster (no persistent storage)
|
||||
kubectl apply -f simple-ha-flink-cluster.yaml
|
||||
|
||||
# 4. Monitor deployment
|
||||
kubectl get flinkdeployments -n freeleaps-data-platform
|
||||
kubectl get pods -n freeleaps-data-platform -l app=flink
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
```bash
|
||||
# 1. Deploy operator
|
||||
kubectl apply -f flink-operator-v2.yaml
|
||||
|
||||
# 2. Deploy storage resources
|
||||
kubectl apply -f flink-storage.yaml
|
||||
|
||||
# 3. Deploy production HA cluster
|
||||
kubectl apply -f ha-flink-cluster-v2.yaml
|
||||
|
||||
# 4. Monitor deployment
|
||||
kubectl get flinkdeployments -n freeleaps-data-platform
|
||||
kubectl get pods -n freeleaps-data-platform -l app=flink
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinkdeployments.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: flinkdeployments
|
||||
singular: flinkdeployment
|
||||
kind: FlinkDeployment
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinksessionjobs.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: flinksessionjobs
|
||||
singular: flinksessionjob
|
||||
kind: FlinkSessionJob
|
||||
@ -1,298 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinkdeployments.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
names:
|
||||
kind: FlinkDeployment
|
||||
listKind: FlinkDeploymentList
|
||||
plural: flinkdeployments
|
||||
singular: flinkdeployment
|
||||
shortNames:
|
||||
- fd
|
||||
- flinkdeploy
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Job Status
|
||||
type: string
|
||||
jsonPath: .status.jobStatus
|
||||
- name: Flink Version
|
||||
type: string
|
||||
jsonPath: .spec.flinkVersion
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: flinksessionjobs.flink.apache.org
|
||||
spec:
|
||||
group: flink.apache.org
|
||||
names:
|
||||
kind: FlinkSessionJob
|
||||
listKind: FlinkSessionJobList
|
||||
plural: flinksessionjobs
|
||||
singular: flinksessionjob
|
||||
shortNames:
|
||||
- fsj
|
||||
- flinksessionjob
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Job Status
|
||||
type: string
|
||||
jsonPath: .status.jobStatus
|
||||
- name: Flink Deployment
|
||||
type: string
|
||||
jsonPath: .spec.deploymentName
|
||||
- name: Age
|
||||
type: date
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: flink-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flink-kubernetes-operator
|
||||
namespace: flink-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flink-kubernetes-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
- cronjobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- flink.apache.org
|
||||
resources:
|
||||
- flinkdeployments
|
||||
- flinkdeployments/status
|
||||
- flinksessionjobs
|
||||
- flinksessionjobs/status
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flink-kubernetes-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flink-kubernetes-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flink-kubernetes-operator
|
||||
namespace: flink-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flink-kubernetes-operator
|
||||
namespace: flink-system
|
||||
labels:
|
||||
app: flink-kubernetes-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flink-kubernetes-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flink-kubernetes-operator
|
||||
spec:
|
||||
serviceAccountName: flink-kubernetes-operator
|
||||
containers:
|
||||
- name: flink-kubernetes-operator
|
||||
image: apache/flink-kubernetes-operator:1.8.0
|
||||
command: ["/docker-entrypoint.sh"]
|
||||
args: ["operator"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: flink-kubernetes-operator
|
||||
- name: LEADER_ELECTION_ID
|
||||
value: flink-kubernetes-operator
|
||||
- name: LEADER_ELECTION_NAMESPACE
|
||||
value: flink-system
|
||||
ports:
|
||||
- containerPort: 8085
|
||||
name: metrics
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8085
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8085
|
||||
@ -1,66 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flink
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: flink-role
|
||||
namespace: freeleaps-data-platform
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "secrets", "services", "pods", "events", "endpoints"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "statefulsets"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: flink-role-binding
|
||||
namespace: freeleaps-data-platform
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: flink-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flink
|
||||
namespace: freeleaps-data-platform
|
||||
---
|
||||
# Additional permissions for HA functionality
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flink-ha-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "secrets", "services", "pods", "events", "endpoints"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "statefulsets"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flink-ha-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flink-ha-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flink
|
||||
namespace: freeleaps-data-platform
|
||||
@ -1,82 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ha-flink-ha-data
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
component: ha-storage
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ha-flink-checkpoints
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
component: checkpoint-storage
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
storageClassName: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ha-flink-savepoints
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
component: savepoint-storage
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
storageClassName: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flink
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: flink-role
|
||||
namespace: freeleaps-data-platform
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "secrets", "services", "pods"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: flink-role-binding
|
||||
namespace: freeleaps-data-platform
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: flink-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flink
|
||||
namespace: freeleaps-data-platform
|
||||
@ -1,94 +0,0 @@
|
||||
apiVersion: flink.apache.org/v1beta1
|
||||
kind: FlinkDeployment
|
||||
metadata:
|
||||
name: ha-flink-cluster-v2
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
component: streaming
|
||||
cluster-type: ha
|
||||
spec:
|
||||
flinkVersion: v1_19
|
||||
image: flink:1.19.0
|
||||
flinkConfiguration:
|
||||
# High Availability Configuration
|
||||
high-availability.type: kubernetes
|
||||
high-availability.storageDir: file:///opt/flink/ha-data
|
||||
# Checkpointing Configuration
|
||||
state.backend.type: filesystem
|
||||
state.checkpoints.dir: file:///opt/flink/checkpoints
|
||||
state.savepoints.dir: file:///opt/flink/savepoints
|
||||
execution.checkpointing.interval: 60s
|
||||
execution.checkpointing.min-pause: 5s
|
||||
execution.checkpointing.timeout: 10min
|
||||
# JobManager Configuration
|
||||
jobmanager.rpc.address: ha-flink-cluster-v2-jobmanager
|
||||
jobmanager.rpc.port: "6123"
|
||||
jobmanager.bind-host: "0.0.0.0"
|
||||
# REST Configuration
|
||||
rest.address: ha-flink-cluster-v2-jobmanager
|
||||
rest.port: "8081"
|
||||
rest.bind-address: "0.0.0.0"
|
||||
# Blob Server Configuration
|
||||
blob.server.port: "6124"
|
||||
# TaskManager Configuration
|
||||
taskmanager.numberOfTaskSlots: "2"
|
||||
# Memory Configuration
|
||||
taskmanager.memory.process.size: 2048m
|
||||
jobmanager.memory.process.size: 1024m
|
||||
# Restart Strategy
|
||||
restart-strategy.type: exponential-delay
|
||||
restart-strategy.exponential-delay.initial-backoff: 10s
|
||||
restart-strategy.exponential-delay.max-backoff: 2min
|
||||
restart-strategy.exponential-delay.backoff-multiplier: "2.0"
|
||||
restart-strategy.exponential-delay.reset-backoff-threshold: 10min
|
||||
restart-strategy.exponential-delay.jitter-factor: "0.1"
|
||||
serviceAccount: flink
|
||||
jobManager:
|
||||
replicas: 2
|
||||
resource:
|
||||
memory: "1024m"
|
||||
cpu: 0.5
|
||||
podTemplate:
|
||||
spec:
|
||||
containers:
|
||||
- name: flink-main-container
|
||||
volumeMounts:
|
||||
- name: ha-data
|
||||
mountPath: /opt/flink/ha-data
|
||||
- name: checkpoints
|
||||
mountPath: /opt/flink/checkpoints
|
||||
- name: savepoints
|
||||
mountPath: /opt/flink/savepoints
|
||||
volumes:
|
||||
- name: ha-data
|
||||
persistentVolumeClaim:
|
||||
claimName: ha-flink-ha-data
|
||||
- name: checkpoints
|
||||
persistentVolumeClaim:
|
||||
claimName: ha-flink-checkpoints
|
||||
- name: savepoints
|
||||
persistentVolumeClaim:
|
||||
claimName: ha-flink-savepoints
|
||||
taskManager:
|
||||
replicas: 3
|
||||
resource:
|
||||
memory: "2048m"
|
||||
cpu: 0.5
|
||||
podTemplate:
|
||||
spec:
|
||||
containers:
|
||||
- name: flink-main-container
|
||||
volumeMounts:
|
||||
- name: checkpoints
|
||||
mountPath: /opt/flink/checkpoints
|
||||
- name: savepoints
|
||||
mountPath: /opt/flink/savepoints
|
||||
volumes:
|
||||
- name: checkpoints
|
||||
persistentVolumeClaim:
|
||||
claimName: ha-flink-checkpoints
|
||||
- name: savepoints
|
||||
persistentVolumeClaim:
|
||||
claimName: ha-flink-savepoints
|
||||
|
||||
@ -1,46 +0,0 @@
|
||||
apiVersion: flink.apache.org/v1beta1
|
||||
kind: FlinkDeployment
|
||||
metadata:
|
||||
name: simple-ha-flink-cluster
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: flink
|
||||
component: streaming
|
||||
cluster-type: simple-ha
|
||||
spec:
|
||||
flinkVersion: v1_18
|
||||
image: flink:1.18
|
||||
flinkConfiguration:
|
||||
# Basic Configuration
|
||||
taskmanager.numberOfTaskSlots: "2"
|
||||
# High Availability Configuration (using ephemeral storage)
|
||||
high-availability.type: kubernetes
|
||||
high-availability.storageDir: file:///tmp/flink/ha-data
|
||||
# Checkpointing Configuration (using ephemeral storage)
|
||||
state.backend.type: filesystem
|
||||
state.checkpoints.dir: file:///tmp/flink/checkpoints
|
||||
state.savepoints.dir: file:///tmp/flink/savepoints
|
||||
execution.checkpointing.interval: 60s
|
||||
execution.checkpointing.min-pause: 5s
|
||||
execution.checkpointing.timeout: 10min
|
||||
# Memory Configuration
|
||||
taskmanager.memory.process.size: 2048m
|
||||
jobmanager.memory.process.size: 1024m
|
||||
# Restart Strategy
|
||||
restart-strategy.type: exponential-delay
|
||||
restart-strategy.exponential-delay.initial-backoff: 10s
|
||||
restart-strategy.exponential-delay.max-backoff: 2min
|
||||
restart-strategy.exponential-delay.backoff-multiplier: "2.0"
|
||||
restart-strategy.exponential-delay.reset-backoff-threshold: 10min
|
||||
restart-strategy.exponential-delay.jitter-factor: "0.1"
|
||||
serviceAccount: flink
|
||||
jobManager:
|
||||
replicas: 2
|
||||
resource:
|
||||
memory: "1024m"
|
||||
cpu: 1.0
|
||||
taskManager:
|
||||
replicas: 3
|
||||
resource:
|
||||
memory: "2048m"
|
||||
cpu: 2.0
|
||||
@ -1,897 +0,0 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
##
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: ""
|
||||
storageClass: "azure-disk-std-lrs"
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @param nameOverride String to partially override common.names.fullname
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param commonLabels Labels to add to all deployed objects (sub-charts are not considered)
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Annotations to add to all deployed objects
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Default Kubernetes cluster domain
|
||||
##
|
||||
clusterDomain: freeleaps.cluster
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## Enable diagnostic mode in the deployment
|
||||
##
|
||||
diagnosticMode:
|
||||
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
|
||||
##
|
||||
enabled: false
|
||||
## @param diagnosticMode.command Command to override all containers in the deployment
|
||||
##
|
||||
command:
|
||||
- sleep
|
||||
## @param diagnosticMode.args Args to override all containers in the deployment
|
||||
##
|
||||
args:
|
||||
- infinity
|
||||
## @section Apache Flink parameters
|
||||
##
|
||||
|
||||
## Bitnami Apache Flink image
|
||||
## ref: https://hub.docker.com/r/bitnami/flink/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Apache Flink image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/flink] Apache Flink image repository
|
||||
## @skip image.tag Apache Flink image tag (immutable tags are recommended)
|
||||
## @param image.digest Apache Flink image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy image pull policy
|
||||
## @param image.pullSecrets Apache Flink image pull secrets
|
||||
## @param image.debug Enable image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/flink
|
||||
tag: 2.0.0-debian-12-r7
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Enable debug mode
|
||||
##
|
||||
debug: false
|
||||
## @section Jobmanager deployment parameters
|
||||
##
|
||||
jobmanager:
|
||||
## @param jobmanager.command Command for running the container (set to default if not set). Use array form
|
||||
##
|
||||
command: []
|
||||
## @param jobmanager.args Args for running the container (set to default if not set). Use array form
|
||||
##
|
||||
args: []
|
||||
## @param jobmanager.lifecycleHooks [object] Override default etcd container hooks
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param jobmanager.automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param jobmanager.hostAliases Set pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param jobmanager.extraEnvVars Extra environment variables to be set on flink container
|
||||
## For example:
|
||||
## - name: FOO
|
||||
## value: BAR
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param jobmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param jobmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @param jobmanager.replicaCount Number of Apache Flink Jobmanager replicas
|
||||
##
|
||||
replicaCount: 1
|
||||
## Configure extra options for container's liveness, readiness and startup probes
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
||||
## @param jobmanager.livenessProbe.enabled Enable livenessProbe on Jobmanager nodes
|
||||
## @param jobmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param jobmanager.livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param jobmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param jobmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param jobmanager.livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
## @param jobmanager.startupProbe.enabled Enable startupProbe on Jobmanager containers
|
||||
## @param jobmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param jobmanager.startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param jobmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param jobmanager.startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param jobmanager.startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 15
|
||||
successThreshold: 1
|
||||
## @param jobmanager.readinessProbe.enabled Enable readinessProbe
|
||||
## @param jobmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param jobmanager.readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param jobmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param jobmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param jobmanager.readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 15
|
||||
successThreshold: 1
|
||||
## @param jobmanager.customLivenessProbe Custom livenessProbe that overrides the default one
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param jobmanager.customStartupProbe [object] Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param jobmanager.customReadinessProbe [object] Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## Apache Flink pods' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## Minimum memory for development is 4GB and 2 CPU cores
|
||||
## Minimum memory for production is 8GB and 4 CPU cores
|
||||
## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html
|
||||
##
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param jobmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobmanager.resources is set (jobmanager.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
# resourcesPreset: "small"
|
||||
## @param jobmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 2Gi
|
||||
## @param jobmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## Container ports to expose
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
containerPorts:
|
||||
## @param jobmanager.containerPorts.rpc Port for RPC
|
||||
##
|
||||
rpc: 6123
|
||||
## @param jobmanager.containerPorts.http Port for http UI
|
||||
##
|
||||
http: 8081
|
||||
## @param jobmanager.containerPorts.blob Port for blob server
|
||||
##
|
||||
blob: 6124
|
||||
## Apache Flink jobmanager.service parameters
|
||||
##
|
||||
service:
|
||||
## @param jobmanager.service.type Apache Flink service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## Ports to expose
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
ports:
|
||||
## @param jobmanager.service.ports.rpc Port for RPC
|
||||
##
|
||||
rpc: 6123
|
||||
## @param jobmanager.service.ports.http Port for http UI
|
||||
##
|
||||
http: 8081
|
||||
## @param jobmanager.service.ports.blob Port for blob server
|
||||
## Due the Apache Flink specificities this port should match the jobmanager.containerPorts.blob port. The taskmanager should be
|
||||
## able to communicate with the jobmanager through the port jobmanager indicates to the taskmanager, being the jobmanager not aware of the service port.
|
||||
blob: 6124
|
||||
## Node ports to expose
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
nodePorts:
|
||||
## @param jobmanager.service.nodePorts.rpc Node port for RPC
|
||||
##
|
||||
rpc: ""
|
||||
## @param jobmanager.service.nodePorts.http Node port for http UI
|
||||
##
|
||||
http: ""
|
||||
## @param jobmanager.service.nodePorts.blob Port for blob server
|
||||
##
|
||||
blob: ""
|
||||
## @param jobmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param jobmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer`
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param jobmanager.service.loadBalancerSourceRanges Service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param jobmanager.service.clusterIP Service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param jobmanager.service.externalTrafficPolicy Service external traffic policy
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param jobmanager.service.annotations Provide any additional annotations which may be required.
|
||||
## This can be used to set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
## @param jobmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param jobmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Network Policies
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param jobmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param jobmanager.networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param jobmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param jobmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param jobmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param jobmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param jobmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
## Apache Flink Jobmanager serviceAccount parameters
|
||||
##
|
||||
serviceAccount:
|
||||
## @param jobmanager.serviceAccount.create Enables ServiceAccount
|
||||
##
|
||||
create: true
|
||||
## @param jobmanager.serviceAccount.name ServiceAccount name
|
||||
##
|
||||
name: ""
|
||||
## @param jobmanager.serviceAccount.annotations Annotations to add to all deployed objects
|
||||
##
|
||||
annotations: {}
|
||||
## @param jobmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account.
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## Pod security context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param jobmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context
|
||||
## @param jobmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param jobmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param jobmanager.podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param jobmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param jobmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context
|
||||
## @param jobmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param jobmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser
|
||||
## @param jobmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup
|
||||
## @param jobmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root
|
||||
## @param jobmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation
|
||||
## @param jobmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param jobmanager.containerSecurityContext.privileged Set primary container's Security Context privileged
|
||||
## @param jobmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param jobmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
privileged: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## @param jobmanager.podAnnotations Additional pod annotations
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param jobmanager.podLabels Additional pod labels
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param jobmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param jobmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param jobmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param jobmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
|
||||
##
|
||||
key: ""
|
||||
## @param jobmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param jobmanager.priorityClassName Server priorityClassName
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param jobmanager.affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param jobmanager.nodeSelector Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param jobmanager.tolerations Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param jobmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param jobmanager.schedulerName Alternative scheduler
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param jobmanager.updateStrategy.type Apache Flink jobmanager deployment strategy type
|
||||
## @param jobmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink jobmanager deployment rolling update configuration parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate: null
|
||||
## @param jobmanager.extraVolumes Optionally specify extra list of additional volumes for flink container
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param jobmanager.initContainers Add additional init containers to the flink pods
|
||||
##
|
||||
initContainers: []
|
||||
## @param jobmanager.sidecars Add additional sidecar containers to the flink pods
|
||||
##
|
||||
sidecars: []
|
||||
|
||||
## @param jobmanager.pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param jobmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param jobmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @section TaskManager deployment parameters
|
||||
##
|
||||
taskmanager:
|
||||
## @param taskmanager.command Command for running the container (set to default if not set). Use array form
|
||||
##
|
||||
command: []
|
||||
## @param taskmanager.args Args for running the container (set to default if not set). Use array form
|
||||
##
|
||||
args: []
|
||||
## @param taskmanager.lifecycleHooks [object] Override default etcd container hooks
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param taskmanager.automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param taskmanager.hostAliases Set pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container
|
||||
## For example:
|
||||
## - name: FOO
|
||||
## value: BAR
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param taskmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param taskmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @param taskmanager.replicaCount Number of Apache Flink replicas
|
||||
##
|
||||
replicaCount: 1
|
||||
## Configure extra options for container's liveness, readiness and startup probes
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
|
||||
## @param taskmanager.livenessProbe.enabled Enable livenessProbe on taskmanager nodes
|
||||
## @param taskmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param taskmanager.livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param taskmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param taskmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param taskmanager.livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
## @param taskmanager.startupProbe.enabled Enable startupProbe on taskmanager containers
|
||||
## @param taskmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param taskmanager.startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param taskmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param taskmanager.startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param taskmanager.startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 15
|
||||
successThreshold: 1
|
||||
## @param taskmanager.readinessProbe.enabled Enable readinessProbe
|
||||
## @param taskmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param taskmanager.readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param taskmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param taskmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param taskmanager.readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 15
|
||||
successThreshold: 1
|
||||
## @param taskmanager.customLivenessProbe Custom livenessProbe that overrides the default one
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param taskmanager.customStartupProbe [object] Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param taskmanager.customReadinessProbe [object] Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## Apache Flink pods' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## Minimum memory for development is 4GB and 2 CPU cores
|
||||
## Minimum memory for production is 8GB and 4 CPU cores
|
||||
## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html
|
||||
##
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param taskmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if taskmanager.resources is set (taskmanager.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
# resourcesPreset: "small"
|
||||
## @param taskmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 2Gi
|
||||
## @param taskmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## Container ports to expose
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
## @param taskmanager.containerPorts.data data exchange port
|
||||
## @param taskmanager.containerPorts.rpc Port for RPC
|
||||
## @param taskmanager.containerPorts.internalMetrics Port for internal metrics query service
|
||||
##
|
||||
containerPorts:
|
||||
data: 6121
|
||||
rpc: 6122
|
||||
internalMetrics: 6126
|
||||
## Apache Flink taskmanager.service parameters
|
||||
##
|
||||
service:
|
||||
## @param taskmanager.service.type Apache Flink service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## Ports to expose
|
||||
## @param taskmanager.service.ports.data data exchange port
|
||||
## @param taskmanager.service.ports.rpc Port for RPC
|
||||
## @param taskmanager.service.ports.internalMetrics Port for internal metrics query service
|
||||
##
|
||||
ports:
|
||||
data: 6121
|
||||
rpc: 6122
|
||||
internalMetrics: 6126
|
||||
## Node ports to expose
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
## @param taskmanager.service.nodePorts.data data exchange port
|
||||
## @param taskmanager.service.nodePorts.rpc Port for RPC
|
||||
## @param taskmanager.service.nodePorts.internalMetrics Port for internal metrics query service
|
||||
##
|
||||
nodePorts:
|
||||
data: ""
|
||||
rpc: ""
|
||||
internalMetrics: ""
|
||||
## @param taskmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param taskmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer`
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param taskmanager.service.loadBalancerSourceRanges Service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param taskmanager.service.clusterIP Service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param taskmanager.service.externalTrafficPolicy Service external traffic policy
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param taskmanager.service.annotations Provide any additional annotations which may be required.
|
||||
## This can be used to set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
## @param taskmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param taskmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Network Policies
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param taskmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param taskmanager.networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param taskmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param taskmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param taskmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param taskmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param taskmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
## Apache Flink taskmanager serviceAccount parameters
|
||||
##
|
||||
serviceAccount:
|
||||
## @param taskmanager.serviceAccount.create Enables ServiceAccount
|
||||
##
|
||||
create: true
|
||||
## @param taskmanager.serviceAccount.name ServiceAccount name
|
||||
##
|
||||
name: ""
|
||||
## @param taskmanager.serviceAccount.annotations Annotations to add to all deployed objects
|
||||
##
|
||||
annotations: {}
|
||||
## @param taskmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account.
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## Pod security context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param taskmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context
|
||||
## @param taskmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param taskmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param taskmanager.podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param taskmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param taskmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context
|
||||
## @param taskmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param taskmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser
|
||||
## @param taskmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup
|
||||
## @param taskmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root
|
||||
## @param taskmanager.containerSecurityContext.privileged Set primary container's Security Context privileged
|
||||
## @param taskmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation
|
||||
## @param taskmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param taskmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param taskmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## @param taskmanager.podAnnotations Additional pod annotations
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param taskmanager.podLabels Additional pod labels
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param taskmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param taskmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param taskmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param taskmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
|
||||
##
|
||||
key: ""
|
||||
## @param taskmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param taskmanager.priorityClassName Server priorityClassName
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param taskmanager.affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param taskmanager.nodeSelector Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param taskmanager.tolerations Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param taskmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param taskmanager.schedulerName Alternative scheduler
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param taskmanager.podManagementPolicy Pod management policy for the Apache Flink taskmanager statefulset
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
|
||||
##
|
||||
podManagementPolicy: Parallel
|
||||
## @param taskmanager.updateStrategy.type Apache Flink taskmanager statefulset strategy type
|
||||
## @param taskmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink taskmanager statefulset rolling update configuration parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate: null
|
||||
## @param taskmanager.extraVolumes Optionally specify extra list of additional volumes for flink container
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param taskmanager.initContainers Add additional init containers to the flink pods
|
||||
##
|
||||
initContainers: []
|
||||
## @param taskmanager.sidecars Add additional sidecar containers to the flink pods
|
||||
##
|
||||
sidecars: []
|
||||
## @param taskmanager.pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param taskmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param taskmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
21
cluster/manifests/freeleaps-data-platform/kafbat/config.yaml
Normal file
21
cluster/manifests/freeleaps-data-platform/kafbat/config.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kafbat-config
|
||||
namespace: freeleaps-data-platform
|
||||
data:
|
||||
config.yml: |
|
||||
kafka:
|
||||
clusters:
|
||||
- name: freeleaps-kafka-cluster
|
||||
bootstrap-servers: freeleaps-kafka-cluster-kafka-bootstrap.freeleaps-data-platform.svc.freeleaps.cluster:9092
|
||||
properties:
|
||||
security.protocol: SASL_PLAINTEXT
|
||||
sasl.mechanism: SCRAM-SHA-512
|
||||
sasl.jaas.config: 'org.apache.kafka.common.security.scram.ScramLoginModule required username="kafbat-agent" password="lUL2Ay9twljpMMNC2mdwMacK9PoPkMGv";'
|
||||
schemaRegistry: http://schema-registry.freeleaps-data-platform.svc.freeleaps.cluster:8081
|
||||
auth: disabled
|
||||
management:
|
||||
health:
|
||||
ldap:
|
||||
enabled: false
|
||||
@ -0,0 +1,48 @@
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaUser
|
||||
metadata:
|
||||
name: kafbat-agent
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: freeleaps-kafka-cluster
|
||||
spec:
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
authorization:
|
||||
type: simple
|
||||
acls:
|
||||
- resource:
|
||||
type: topic
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- Read
|
||||
- resource:
|
||||
type: topic
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- DescribeConfigs
|
||||
- resource:
|
||||
type: group
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- Describe
|
||||
- resource:
|
||||
type: cluster
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- Describe
|
||||
- resource:
|
||||
type: cluster
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- DescribeConfigs
|
||||
@ -0,0 +1,48 @@
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaConnect
|
||||
metadata:
|
||||
name: freeleaps-prod-mongo-cds
|
||||
namespace: freeleaps-data-platform
|
||||
annotations:
|
||||
strimzi.io/use-connector-resources: "true"
|
||||
spec:
|
||||
version: 4.0.0
|
||||
bootstrapServers: freeleaps-kafka-cluster-kafka-bootstrap.freeleaps-data-platform.svc.freeleaps.cluster:9093
|
||||
config:
|
||||
config.storage.topic: freeleaps-prod-mongo-cds-configs
|
||||
config.storage.replication.factor: -1
|
||||
group.id: freeleaps-prod-mongo-cds
|
||||
offset.storage.topic: freeleaps-prod-mongo-cds-offsets
|
||||
offset.storage.replication.factor: -1
|
||||
status.storage.topic: freeleaps-prod-mongo-cds-status
|
||||
status.storage.replication.factor: -1
|
||||
image: freeleaps/kafka-connectors:mongo-connector
|
||||
replicas: 1
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
username: freeleaps-user
|
||||
passwordSecret:
|
||||
secretName: freeleaps-user
|
||||
password: password
|
||||
tls:
|
||||
trustedCertificates:
|
||||
- secretName: freeleaps-kafka-cluster-cluster-ca-cert
|
||||
pattern: "*.crt"
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaConnector
|
||||
metadata:
|
||||
name: freeleaps-prod-mongo-cds
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: freeleaps-prod-mongo-cds
|
||||
spec:
|
||||
class: com.mongodb.kafka.connect.MongoSourceConnector
|
||||
config:
|
||||
connection.uri: mongodb+srv://freeadmin:0eMV0bt8oyaknA0m@freeleaps2.zmsmpos.mongodb.net/?retryWrites=true&w=majority
|
||||
database: freeleaps2
|
||||
pipeline: '[{"$match": {"operationType": {"$in": ["insert", "update", "delete", "replace"]}}}]'
|
||||
startup.mode: copy_existing
|
||||
startup.mode.copy.existing.pipeline: "[]"
|
||||
topic.prefix: raw-cds-events
|
||||
tasksMax: 1
|
||||
@ -0,0 +1,108 @@
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaNodePool
|
||||
metadata:
|
||||
name: freeleaps-kafka-cluster-controllers
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: freeleaps-kafka-cluster
|
||||
spec:
|
||||
replicas: 3
|
||||
roles:
|
||||
- controller
|
||||
storage:
|
||||
type: jbod
|
||||
volumes:
|
||||
- id: 0
|
||||
type: persistent-claim
|
||||
size: 25Gi
|
||||
deleteClaim: false
|
||||
kraftMetadata: shared
|
||||
class: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaNodePool
|
||||
metadata:
|
||||
name: freeleaps-kafka-cluster-brokers
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: freeleaps-kafka-cluster
|
||||
spec:
|
||||
replicas: 3
|
||||
roles:
|
||||
- broker
|
||||
storage:
|
||||
type: jbod
|
||||
volumes:
|
||||
- id: 0
|
||||
type: persistent-claim
|
||||
size: 25Gi
|
||||
deleteClaim: false
|
||||
kraftMetadata: shared
|
||||
class: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: Kafka
|
||||
metadata:
|
||||
name: freeleaps-kafka-cluster
|
||||
namespace: freeleaps-data-platform
|
||||
annotations:
|
||||
strimzi.io/node-pools: enabled
|
||||
strimzi.io/kraft: enabled
|
||||
spec:
|
||||
kafka:
|
||||
version: 4.0.0
|
||||
metadataVersion: 4.0-IV3
|
||||
listeners:
|
||||
- name: plain
|
||||
port: 9092
|
||||
type: internal
|
||||
tls: false
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
- name: tls
|
||||
port: 9093
|
||||
type: internal
|
||||
tls: true
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
config:
|
||||
offsets.topic.replication.factor: 1
|
||||
transaction.state.log.replication.factor: 1
|
||||
transaction.state.log.min.isr: 1
|
||||
default.replication.factor: 1
|
||||
min.insync.replicas: 1
|
||||
authorization:
|
||||
type: simple
|
||||
superUsers:
|
||||
- freeleaps-user
|
||||
entityOperator:
|
||||
topicOperator: {}
|
||||
userOperator: {}
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaUser
|
||||
metadata:
|
||||
name: freeleaps-user
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: freeleaps-kafka-cluster
|
||||
spec:
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
authorization:
|
||||
type: simple
|
||||
acls:
|
||||
- resource:
|
||||
type: topic
|
||||
name: "*"
|
||||
patternType: literal
|
||||
host: "*"
|
||||
operations:
|
||||
- All
|
||||
- resource:
|
||||
type: group
|
||||
name: "*"
|
||||
patternType: literal
|
||||
operations:
|
||||
- All
|
||||
host: "*"
|
||||
@ -1,90 +0,0 @@
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: Kafka
|
||||
metadata:
|
||||
name: kafka-cluster
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
app: kafka
|
||||
component: messaging
|
||||
spec:
|
||||
kafka:
|
||||
version: 3.8.0
|
||||
replicas: 3
|
||||
listeners:
|
||||
- name: plain
|
||||
port: 9092
|
||||
type: internal
|
||||
tls: false
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
- name: tls
|
||||
port: 9093
|
||||
type: internal
|
||||
tls: true
|
||||
authentication:
|
||||
type: tls
|
||||
config:
|
||||
offsets.topic.replication.factor: 3
|
||||
transaction.state.log.replication.factor: 3
|
||||
transaction.state.log.min.isr: 2
|
||||
default.replication.factor: 3
|
||||
min.insync.replicas: 2
|
||||
inter.broker.protocol.version: "3.8"
|
||||
log.retention.hours: 168 # 7 days
|
||||
log.segment.bytes: 1073741824 # 1GB
|
||||
num.partitions: 8
|
||||
log.retention.check.interval.ms: 300000
|
||||
storage:
|
||||
type: persistent-claim
|
||||
size: 20Gi
|
||||
deleteClaim: false
|
||||
zookeeper:
|
||||
replicas: 3
|
||||
storage:
|
||||
type: persistent-claim
|
||||
size: 10Gi
|
||||
deleteClaim: false
|
||||
entityOperator:
|
||||
topicOperator: {}
|
||||
userOperator: {}
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaTopic
|
||||
metadata:
|
||||
name: system-events
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: kafka-cluster
|
||||
spec:
|
||||
partitions: 3
|
||||
replicas: 3
|
||||
config:
|
||||
retention.ms: 604800000 # 7 days
|
||||
segment.bytes: 1073741824 # 1GB
|
||||
cleanup.policy: delete
|
||||
---
|
||||
apiVersion: kafka.strimzi.io/v1beta2
|
||||
kind: KafkaUser
|
||||
metadata:
|
||||
name: freeleaps-user
|
||||
namespace: freeleaps-data-platform
|
||||
labels:
|
||||
strimzi.io/cluster: kafka-cluster
|
||||
spec:
|
||||
authentication:
|
||||
type: scram-sha-512
|
||||
authorization:
|
||||
type: simple
|
||||
acls:
|
||||
- resource:
|
||||
type: topic
|
||||
name: "*"
|
||||
patternType: literal
|
||||
operation: All
|
||||
host: "*"
|
||||
- resource:
|
||||
type: group
|
||||
name: "*"
|
||||
patternType: literal
|
||||
operation: All
|
||||
host: "*"
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,126 +0,0 @@
|
||||
# Metabase Deployment
|
||||
|
||||
## Overview
|
||||
This directory contains Kubernetes manifests for deploying Metabase, a business intelligence and analytics platform, along with its PostgreSQL database.
|
||||
|
||||
## Components
|
||||
|
||||
### 1. metabase-postgres.yaml
|
||||
PostgreSQL database deployment:
|
||||
- Persistent storage for data
|
||||
- ConfigMap for database configuration
|
||||
- Secret for database password
|
||||
- Service for internal communication
|
||||
|
||||
### 2. metabase-deployment.yaml
|
||||
Metabase application deployment:
|
||||
- Metabase container with latest image
|
||||
- Persistent storage for application data
|
||||
- Environment variables for database connection
|
||||
- Health checks and resource limits
|
||||
- Service for internal communication
|
||||
|
||||
### 3. metabase-ingress.yaml
|
||||
Ingress configuration for external access:
|
||||
- Nginx ingress class
|
||||
- Multiple host support (metabase.freeleaps.cluster, metabase.local)
|
||||
- SSL redirect disabled for development
|
||||
|
||||
## Deployment Steps
|
||||
|
||||
### 1. Deploy PostgreSQL Database
|
||||
```bash
|
||||
kubectl apply -f metabase-postgres.yaml
|
||||
```
|
||||
|
||||
### 2. Wait for PostgreSQL to be Ready
|
||||
```bash
|
||||
kubectl wait --for=condition=ready pod -l app=metabase-postgres -n metabase --timeout=300s
|
||||
```
|
||||
|
||||
### 3. Deploy Metabase Application
|
||||
```bash
|
||||
kubectl apply -f metabase-deployment.yaml
|
||||
```
|
||||
|
||||
### 4. Deploy Ingress (Optional)
|
||||
```bash
|
||||
kubectl apply -f metabase-ingress.yaml
|
||||
```
|
||||
|
||||
### 5. Monitor Deployment
|
||||
```bash
|
||||
kubectl get pods -n metabase
|
||||
kubectl get services -n metabase
|
||||
kubectl get ingress -n metabase
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Database Connection
|
||||
- **Type**: PostgreSQL
|
||||
- **Host**: metabase-postgres
|
||||
- **Port**: 5432
|
||||
- **Database**: metabase
|
||||
- **User**: metabase
|
||||
- **Password**: metabasepassword
|
||||
|
||||
### Storage
|
||||
- **Metabase Data**: 10Gi persistent storage
|
||||
- **PostgreSQL Data**: 5Gi persistent storage
|
||||
- **Storage Class**: azure-disk-std-ssd-lrs
|
||||
|
||||
### Resources
|
||||
- **Metabase**: 512Mi-1Gi memory, 250m-500m CPU
|
||||
- **PostgreSQL**: 256Mi-512Mi memory, 250m-500m CPU
|
||||
|
||||
## Access
|
||||
|
||||
### Internal Access
|
||||
- **Metabase**: http://metabase.metabase.svc.cluster.local:3000
|
||||
- **PostgreSQL**: metabase-postgres.metabase.svc.cluster.local:5432
|
||||
|
||||
### External Access (with Ingress)
|
||||
- **Primary**: http://metabase.freeleaps.cluster
|
||||
- **Alternative**: http://metabase.local
|
||||
|
||||
## Initial Setup
|
||||
|
||||
1. **First Access**: Navigate to the Metabase URL
|
||||
2. **Setup Wizard**: Follow the initial setup wizard
|
||||
3. **Database Connection**: Use the internal PostgreSQL connection
|
||||
4. **Admin User**: Create the first admin user
|
||||
5. **Data Sources**: Connect to your data sources
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check Pod Status
|
||||
```bash
|
||||
kubectl get pods -n metabase
|
||||
kubectl describe pod <pod-name> -n metabase
|
||||
```
|
||||
|
||||
### Check Logs
|
||||
```bash
|
||||
kubectl logs <pod-name> -n metabase
|
||||
kubectl logs <pod-name> -n metabase -c postgres # for PostgreSQL
|
||||
```
|
||||
|
||||
### Check Services
|
||||
```bash
|
||||
kubectl get services -n metabase
|
||||
kubectl describe service metabase -n metabase
|
||||
```
|
||||
|
||||
### Check Storage
|
||||
```bash
|
||||
kubectl get pvc -n metabase
|
||||
kubectl describe pvc metabase-data -n metabase
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Initial Startup**: Metabase may take 2-3 minutes to start up on first deployment
|
||||
- **Database**: Ensure PostgreSQL is fully ready before deploying Metabase
|
||||
- **Storage**: Both Metabase and PostgreSQL use persistent storage for data persistence
|
||||
- **Security**: Default password is base64 encoded, consider changing for production use
|
||||
@ -0,0 +1,13 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: metabase-mathmast-dot-com
|
||||
namespace: freeleaps-data-platform
|
||||
spec:
|
||||
commonName: metabase.mathmast.com
|
||||
dnsNames:
|
||||
- metabase.mathmast.com
|
||||
issuerRef:
|
||||
name: mathmast-dot-com
|
||||
kind: ClusterIssuer
|
||||
secretName: metabase-mathmast-dot-com-tls
|
||||
@ -1,145 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metabase
|
||||
labels:
|
||||
name: metabase
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metabase
|
||||
namespace: metabase
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: metabase-data
|
||||
namespace: metabase
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: metabase-config
|
||||
namespace: metabase
|
||||
data:
|
||||
MB_DB_TYPE: "postgres"
|
||||
MB_DB_DBNAME: "metabase"
|
||||
MB_DB_PORT: "5432"
|
||||
MB_DB_USER: "metabase"
|
||||
MB_DB_HOST: "metabase-postgres"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metabase
|
||||
namespace: metabase
|
||||
labels:
|
||||
app: metabase
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metabase
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metabase
|
||||
spec:
|
||||
serviceAccountName: metabase
|
||||
containers:
|
||||
- name: metabase
|
||||
image: metabase/metabase:latest
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
name: http
|
||||
env:
|
||||
- name: MB_DB_TYPE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-config
|
||||
key: MB_DB_TYPE
|
||||
- name: MB_DB_DBNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-config
|
||||
key: MB_DB_DBNAME
|
||||
- name: MB_DB_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-config
|
||||
key: MB_DB_PORT
|
||||
- name: MB_DB_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-config
|
||||
key: MB_DB_USER
|
||||
- name: MB_DB_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-config
|
||||
key: MB_DB_HOST
|
||||
- name: MB_DB_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: metabase-db-secret
|
||||
key: password
|
||||
volumeMounts:
|
||||
- name: metabase-data
|
||||
mountPath: /metabase-data
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: metabase-data
|
||||
persistentVolumeClaim:
|
||||
claimName: metabase-data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metabase
|
||||
namespace: metabase
|
||||
labels:
|
||||
app: metabase
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 3000
|
||||
targetPort: 3000
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: metabase
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: metabase-db-secret
|
||||
namespace: metabase
|
||||
type: Opaque
|
||||
data:
|
||||
password: bWV0YWJhc2VwYXNzd29yZA== # metabasepassword in base64
|
||||
@ -1,32 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: metabase-ingress
|
||||
namespace: metabase
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
|
||||
spec:
|
||||
rules:
|
||||
- host: metabase.freeleaps.cluster
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: metabase
|
||||
port:
|
||||
number: 3000
|
||||
- host: metabase.local
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: metabase
|
||||
port:
|
||||
number: 3000
|
||||
@ -1,119 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: metabase-postgres-data
|
||||
namespace: metabase
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: azure-disk-std-ssd-lrs
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: metabase-postgres-config
|
||||
namespace: metabase
|
||||
data:
|
||||
POSTGRES_DB: "metabase"
|
||||
POSTGRES_USER: "metabase"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: metabase-postgres-secret
|
||||
namespace: metabase
|
||||
type: Opaque
|
||||
data:
|
||||
POSTGRES_PASSWORD: bWV0YWJhc2VwYXNzd29yZA== # metabasepassword in base64
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metabase-postgres
|
||||
namespace: metabase
|
||||
labels:
|
||||
app: metabase-postgres
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metabase-postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: metabase-postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:15
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
name: postgres
|
||||
env:
|
||||
- name: POSTGRES_DB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-postgres-config
|
||||
key: POSTGRES_DB
|
||||
- name: POSTGRES_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: metabase-postgres-config
|
||||
key: POSTGRES_USER
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: metabase-postgres-secret
|
||||
key: POSTGRES_PASSWORD
|
||||
- name: PGDATA
|
||||
value: "/var/lib/postgresql/data/pgdata"
|
||||
volumeMounts:
|
||||
- name: postgres-data
|
||||
mountPath: /var/lib/postgresql/data
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pg_isready
|
||||
- -U
|
||||
- metabase
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- pg_isready
|
||||
- -U
|
||||
- metabase
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: postgres-data
|
||||
persistentVolumeClaim:
|
||||
claimName: metabase-postgres-data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metabase-postgres
|
||||
namespace: metabase
|
||||
labels:
|
||||
app: metabase-postgres
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 5432
|
||||
targetPort: 5432
|
||||
protocol: TCP
|
||||
name: postgres
|
||||
selector:
|
||||
app: metabase-postgres
|
||||
361
cluster/manifests/freeleaps-data-platform/metabase/values.yaml
Normal file
361
cluster/manifests/freeleaps-data-platform/metabase/values.yaml
Normal file
@ -0,0 +1,361 @@
|
||||
replicaCount: 1
|
||||
|
||||
hpa:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 6
|
||||
targetCPUUtilizationPercentage: 80
|
||||
targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Adding host aliases to the metabase deployment
|
||||
hostAliases: []
|
||||
# - ip: "127.0.0.1"
|
||||
# hostnames:
|
||||
# - "foo.local"
|
||||
# - "bar.local"
|
||||
|
||||
pdb:
|
||||
create: false
|
||||
minAvailable: 1
|
||||
maxUnavailable: ""
|
||||
|
||||
deploymentAnnotations: {}
|
||||
deploymentLabels: {}
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
podSecurityContext: {}
|
||||
image:
|
||||
repository: metabase/metabase
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
command: []
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecrets: []
|
||||
|
||||
## String to fully override metabase.fullname template
|
||||
##
|
||||
# fullnameOverride:
|
||||
|
||||
# Config Jetty web server
|
||||
listen:
|
||||
host: "0.0.0.0"
|
||||
port: 3000
|
||||
|
||||
monitoring:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
port: 9191
|
||||
|
||||
ssl:
|
||||
# If you have an ssl certificate and would prefer to have Metabase run over HTTPS
|
||||
enabled: false
|
||||
# port: 8443
|
||||
# keyStore: |-
|
||||
# << JKS KEY STORE >>
|
||||
# keyStorePassword: storepass
|
||||
jetty:
|
||||
# maxThreads: 254
|
||||
# minThreads: 8
|
||||
# maxQueued: -1
|
||||
# maxIdleTime: 60000
|
||||
|
||||
# Backend database
|
||||
database:
|
||||
# Database type (h2 / mysql / postgres), default: h2
|
||||
type: h2
|
||||
# if h2 is used, the persistentVolume and pvc are used to store the database. Only for non-production environments.
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
storageClassName: azure-disk-std-lrs
|
||||
## (Optional) Use this to bind the claim to an existing PersistentVolume (PV) by name.
|
||||
volumeName: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 15Gi
|
||||
# annotations: {}
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
# selectorLabels: {}
|
||||
## Sub-directory of the PV to mount. Can be templated.
|
||||
# subPath: ""
|
||||
## Name of an existing PVC. Can be templated.
|
||||
# existingClaim:
|
||||
## Extra labels to apply to a PVC.
|
||||
extraPvcLabels: {}
|
||||
disableWarning: false
|
||||
|
||||
## If 'lookupVolumeName' is set to true, Helm will attempt to retrieve
|
||||
## the current value of 'spec.volumeName' and incorporate it into the template.
|
||||
lookupVolumeName: true
|
||||
## Specify file to store H2 database. You will also have to back this with a volume (cf. extraVolume and extraVolumeMounts)!
|
||||
# file:
|
||||
# encryptionKey: << YOUR ENCRYPTION KEY OR LEAVE BLANK AND USE EXISTING SECRET >>
|
||||
## Only need when you use mysql / postgres
|
||||
# host:
|
||||
# port:
|
||||
# dbname:
|
||||
# username:
|
||||
# password:
|
||||
## Alternatively, use a connection URI for full configurability. Example for SSL enabled Postgres.
|
||||
# connectionURI: postgres://<host>:<port>/<database>?user=<username>&password=<password>&ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory
|
||||
## If a secret with the database credentials already exists, use the following values:
|
||||
# existingSecret:
|
||||
# existingSecretUsernameKey:
|
||||
# existingSecretPasswordKey:
|
||||
# existingSecretConnectionURIKey:
|
||||
# existingSecretEncryptionKeyKey:
|
||||
# existingSecretPortKey:
|
||||
# existingSecretHostKey:
|
||||
# existingSecretDatabaseNameKey:
|
||||
## One or more Google Cloud SQL database instances can be made available to Metabase via the *Cloud SQL Auth proxy*.
|
||||
## These can be used for Metabase's internal database (by specifying `host: localhost` and the port above), or as
|
||||
## additional databases (configured at Admin → Databases). Workload Identity should be used for authentication, so
|
||||
## that when `serviceAccount.create=true`, `serviceAccount.annotations` should contain:
|
||||
## iam.gke.io/gcp-service-account: your-gsa@email
|
||||
## Ref: https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine
|
||||
googleCloudSQL:
|
||||
## Found in Cloud Console "Cloud SQL Instance details" or using `gcloud sql instances describe INSTANCE_ID`
|
||||
## example format: $project:$region:$instance=tcp:$port
|
||||
## Each connection must have a unique TCP port.
|
||||
instanceConnectionNames: []
|
||||
## Option to use a specific version of the *Cloud SQL Auth proxy* sidecar image.
|
||||
## ref: https://console.cloud.google.com/gcr/images/cloudsql-docker/GLOBAL/gce-proxy
|
||||
# sidecarImageTag: latest
|
||||
## ref: https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine#running_the_as_a_sidecar
|
||||
resources: {}
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
password:
|
||||
# Changing Metabase password complexity:
|
||||
# weak: no character constraints
|
||||
# normal: at least 1 digit (default)
|
||||
# strong: minimum 8 characters w/ 2 lowercase, 2 uppercase, 1 digit, and 1 special character
|
||||
complexity: normal
|
||||
length: 6
|
||||
|
||||
timeZone: UTC
|
||||
emojiLogging: true
|
||||
colorLogging: true
|
||||
javaOpts: -Xmx1024m -Xms512m
|
||||
# pluginsDirectory: /plugins
|
||||
siteUrl: https://metabase.mathmast.com
|
||||
|
||||
session:
|
||||
{}
|
||||
# maxSessionAge:
|
||||
# sessionCookies:
|
||||
# cookieSameSite:
|
||||
|
||||
# specify init containers, e.g. for module download
|
||||
extraInitContainers: []
|
||||
# - name: download-modules
|
||||
# image: "curlimages/curl:7.70.0"
|
||||
# imagePullPolicy: "IfNotPresent"
|
||||
# volumeMounts:
|
||||
# - name: plugins
|
||||
# mountPath: /plugins
|
||||
# workingDir: /plugins
|
||||
# command:
|
||||
# - "/bin/sh"
|
||||
# - "-ec"
|
||||
# - |
|
||||
# curl -Lso /plugins/athena.metabase-driver.jar \
|
||||
# https://github.com/dacort/metabase-athena-driver/releases/download/v1.1.0/athena.metabase-driver.jar
|
||||
|
||||
extraVolumeMounts: []
|
||||
# - name: plugins
|
||||
# mountPath: /plugins
|
||||
# readOnly: false
|
||||
|
||||
extraVolumes: []
|
||||
# - name: plugins
|
||||
# emptyDir: {}
|
||||
|
||||
livenessProbe:
|
||||
path: /api/health
|
||||
initialDelaySeconds: 120
|
||||
timeoutSeconds: 30
|
||||
failureThreshold: 6
|
||||
|
||||
readinessProbe:
|
||||
path: /api/health
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 5
|
||||
|
||||
service:
|
||||
name: metabase
|
||||
type: ClusterIP
|
||||
externalPort: 80
|
||||
internalPort: 3000
|
||||
# Used to fix NodePort when service.type: NodePort.
|
||||
nodePort:
|
||||
annotations:
|
||||
{}
|
||||
# Used to add custom annotations to the Service.
|
||||
# service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
|
||||
labels:
|
||||
{}
|
||||
# Used to add custom labels to the Service.
|
||||
loadBalancerSourceRanges: {}
|
||||
# Used to configure a static IP address
|
||||
loadBalancerIP:
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# The ingress class name, if you use multiple ingress controllers:
|
||||
className: nginx
|
||||
# Used to create Ingress record (should used with service.type: ClusterIP).
|
||||
hosts:
|
||||
- "metabase.mathmast.com"
|
||||
# - metabase.domain.com
|
||||
# The ingress path. Useful to host metabase on a subpath, such as `/metabase`.
|
||||
path: /
|
||||
pathType: Prefix
|
||||
labels:
|
||||
# Used to add custom labels to the Ingress
|
||||
# Useful if for example you have multiple Ingress controllers and want your Ingress controllers to bind to specific Ingresses
|
||||
# traffic: internal
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
tls:
|
||||
# Secrets must be manually created in the namespace.
|
||||
- secretName: metabase-mathmast-dot-com-tls
|
||||
hosts:
|
||||
- metabase.mathmast.com
|
||||
|
||||
route:
|
||||
enabled: false
|
||||
annotations:
|
||||
{}
|
||||
# haproxy.router.openshift.io/timeout: "60s"
|
||||
# host: ""
|
||||
path: ""
|
||||
wildcardPolicy: "None"
|
||||
tls:
|
||||
{}
|
||||
# termination: "Edge"
|
||||
# insecureEdgeTerminationPolicy: "Redirect"
|
||||
# key: ""
|
||||
# certificate: ""
|
||||
# caCertificate: ""
|
||||
# destinationCACertificate: ""
|
||||
|
||||
# A custom log4j2.xml file can be provided using a multiline YAML string.
|
||||
# See https://github.com/metabase/metabase/blob/master/resources/log4j2.xml
|
||||
#
|
||||
# log4j2XML:
|
||||
|
||||
# DEPRECATED; A custom log4j.properties file can be provided using a multiline YAML string.
|
||||
# See https://github.com/metabase/metabase/blob/master/resources/log4j.properties
|
||||
#
|
||||
# log4jProperties:
|
||||
|
||||
# The deployment strategy to use
|
||||
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec
|
||||
# strategy:
|
||||
# type: "Recreate"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# autoMount is deprecated in favor of automountServiceAccountToken
|
||||
# If you want to disable auto mount of Service Account Token then you can set the value to false;
|
||||
# https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#opt-out-of-api-credential-automounting
|
||||
automountServiceAccountToken: false
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
memory: 512Mi
|
||||
|
||||
# You can also opt out of automounting API credentials for a particular Pod;
|
||||
# https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#opt-out-of-api-credential-automounting
|
||||
automountServiceAccountToken: true
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
#
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
## Spread Constraints for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
topologySpreadConstraints: []
|
||||
|
||||
## PriorityClass for pod assignment
|
||||
## ref:
|
||||
## https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority
|
||||
## priorityClass: ""
|
||||
|
||||
## AWS Security Group Policy (EKS)
|
||||
## ref: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html
|
||||
##
|
||||
awsEKS:
|
||||
sgp:
|
||||
enabled: false
|
||||
# AWS Security Group IDs to attach to the pod
|
||||
# sgIds:
|
||||
# - sg-abc123
|
||||
# - sg-xyz456
|
||||
|
||||
extraEnv: []
|
||||
# - name: MB_CHECK_FOR_UPDATES
|
||||
# value: false
|
||||
# - name: MB_ADMIN_EMAIL
|
||||
# valueFrom:
|
||||
# configMapKeyRef:
|
||||
# name: metabase
|
||||
# key: email
|
||||
|
||||
envFrom:
|
||||
[]
|
||||
# - type: secret
|
||||
# name: metabase-secret
|
||||
# - type: configMap
|
||||
# name: metabase-cm
|
||||
|
||||
securityContext: {}
|
||||
|
||||
sidecars:
|
||||
[]
|
||||
# - name: busybox
|
||||
# image: busybox
|
||||
# ports:
|
||||
# - containerPort: 80
|
||||
# name: http
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: 100Mi
|
||||
# cpu: 10m
|
||||
# limits:
|
||||
# memory: 100Mi
|
||||
# cpu: 10m
|
||||
# command: ["/bin/sh"]
|
||||
# args: ["-c", "while true; do echo hello; sleep 10;done"]
|
||||
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: schema-registry-kafka-auth
|
||||
namespace: freeleaps-data-platform
|
||||
type: Opaque
|
||||
data:
|
||||
# Password for freeleaps-user (base64 encoded: xzjo1UdA7xs9d3DDHZ0NgaRoBwdhxyBE)
|
||||
client-passwords: eHpqbzFVZEE3eHM5ZDNEREhaME5nYVJvQndkaHh5QkU=
|
||||
@ -0,0 +1,788 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## e.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: ""
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @param kubeVersion Override Kubernetes version
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @section Common parameters
|
||||
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname template with a string
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param namespaceOverride String to fully override common.names.namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
## @param commonLabels Labels to add to all deployed objects
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Annotations to add to all deployed objects
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Kubernetes cluster domain name
|
||||
##
|
||||
clusterDomain: freeleaps.cluster
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## @param usePasswordFiles Mount credentials as files instead of using environment variables
|
||||
##
|
||||
usePasswordFiles: true
|
||||
## Enable diagnostic mode in the deployment
|
||||
##
|
||||
diagnosticMode:
|
||||
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
|
||||
##
|
||||
enabled: false
|
||||
## @param diagnosticMode.command Command to override all containers in the deployment
|
||||
##
|
||||
command:
|
||||
- sleep
|
||||
## @param diagnosticMode.args Args to override all containers in the deployment
|
||||
##
|
||||
args:
|
||||
- infinity
|
||||
## @section Schema Registry parameters
|
||||
## Bitnami Schema Registry image
|
||||
## ref: https://hub.docker.com/r/bitnami/schema-registry/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Schema Registry image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/schema-registry] Schema Registry image repository
|
||||
## @skip image.tag Schema Registry image tag (immutable tags are recommended)
|
||||
## @param image.digest Schema Registry image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Schema Registry image pull policy
|
||||
## @param image.pullSecrets Schema Registry image pull secrets
|
||||
## @param image.debug Enable image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/schema-registry
|
||||
tag: 8.0.0-debian-12-r4
|
||||
digest: ""
|
||||
## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Set to true if you would like to see extra information on logs
|
||||
##
|
||||
debug: false
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases Schema Registry pods host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param podLabels Extra labels for Schema Registry pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param configuration Specify content for schema-registry.properties. Auto-generated based on other parameters when not specified
|
||||
##
|
||||
## e.g:
|
||||
## configuration: |-
|
||||
## listeners = http://0.0.0.0:8081
|
||||
## kafkastore.bootstrap.servers = protocol://broker_hostname:port
|
||||
## host.name = schema-registry
|
||||
## kafkastore.topic = _schemas
|
||||
## inter.instance.protocol = http
|
||||
## avro.compatibility.level = backward
|
||||
## debug = false
|
||||
##
|
||||
configuration: {}
|
||||
## @param existingConfigmap Name of existing ConfigMap with Schema Registry configuration
|
||||
## NOTE: When it's set the configuration parameter is ignored
|
||||
##
|
||||
existingConfigmap: ""
|
||||
## @param log4j Schema Registry Log4J Configuration (optional)
|
||||
## Overwrites default log4j.properties file
|
||||
##
|
||||
log4j: {}
|
||||
## @param existingLog4jConfigMap Name of existing ConfigMap containing a custom log4j.properties file.
|
||||
## NOTE: When it's set the log4j is ignored
|
||||
##
|
||||
existingLog4jConfigMap: ""
|
||||
## Authentication parameteres
|
||||
## https://github.com/bitnami/bitnami-docker-kafka#security
|
||||
##
|
||||
auth:
|
||||
## TLS parameters to be used when a listener uses HTTPS
|
||||
##
|
||||
tls:
|
||||
## @param auth.tls.enabled Enable TLS configuration to provide to be used when a listener uses HTTPS
|
||||
##
|
||||
enabled: false
|
||||
## @param auth.tls.jksSecret Existing secret containing the truststore and one keystore per Schema Registry replica
|
||||
##
|
||||
## Create this secret following the steps below:
|
||||
## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
|
||||
## 2) Rename your truststore to `schema-registry.truststore.jks`.
|
||||
## 3) Rename your keystores to `schema-registry-X.keystore.jks` where X is the ID of each Schema Registry replica
|
||||
## 4) Run the command below where SECRET_NAME is the name of the secret you want to create:
|
||||
## kubectl create secret generic SECRET_NAME --from-file=./schema-registry.truststore.jks --from-file=./schema-registry-0.keystore.jks --from-file=./schema-registry-1.keystore.jks ...
|
||||
##
|
||||
jksSecret: ""
|
||||
## @param auth.tls.keystorePassword Password to access the keystore when it's password-protected
|
||||
##
|
||||
keystorePassword: ""
|
||||
## @param auth.tls.truststorePassword Password to access the truststore when it's password-protected
|
||||
##
|
||||
truststorePassword: ""
|
||||
## @param auth.tls.clientAuthentication Client authentication configuration.
|
||||
## Valid options: NONE, REQUESTED, over REQUIRED
|
||||
##
|
||||
clientAuthentication: NONE
|
||||
## Parameters to configure authentication with kafka brokers
|
||||
##
|
||||
kafka:
|
||||
## @param auth.kafka.jksSecret Existing secret containing the truststore and one keystore per Schema Registry replica
|
||||
##
|
||||
## Create this secret following the steps below:
|
||||
## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
|
||||
## 2) Rename your truststore to `kafka.truststore.jks`.
|
||||
## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Schema Registry replica
|
||||
## 4) Run the command below where SECRET_NAME is the name of the secret you want to create:
|
||||
## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ...
|
||||
##
|
||||
jksSecret: ""
|
||||
## @param auth.kafka.tlsEndpointIdentificationAlgorithm The endpoint identification algorithm used validate brokers hostnames
|
||||
## Disable server hostname verification by setting it to an empty string
|
||||
## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings
|
||||
##
|
||||
tlsEndpointIdentificationAlgorithm: https
|
||||
## @param auth.kafka.keystorePassword Password to access the keystore when it's password-protected
|
||||
##
|
||||
keystorePassword: ""
|
||||
## @param auth.kafka.truststorePassword Password to access the truststore when it's password-protected
|
||||
##
|
||||
truststorePassword: ""
|
||||
## @param auth.kafka.saslMechanism Mechanism that schema registry will use to connect to kafka. Allowed: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512
|
||||
##
|
||||
saslMechanism: SCRAM-SHA-512
|
||||
## @param listeners Comma-separated list of listeners that listen for API requests over either HTTP or HTTPS
|
||||
##
|
||||
listeners: http://0.0.0.0:8081
|
||||
## @param avroCompatibilityLevel Avro compatibility type
|
||||
## Valid options: none, backward, backward_transitive, forward, forward_transitive, full, or full_transitive
|
||||
##
|
||||
avroCompatibilityLevel: backward
|
||||
## @param extraEnvVars Extra environment variables to be set on Schema Registry container
|
||||
## e.g:
|
||||
## extraEnvVars:
|
||||
## - name: FOO
|
||||
## value: BAR
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Name of existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @section Schema Registry statefulset parameters
|
||||
## @param replicaCount Number of Schema Registry replicas to deploy.
|
||||
##
|
||||
replicaCount: 1
|
||||
## @param updateStrategy.type Schema Registry statefulset strategy type
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
|
||||
##
|
||||
updateStrategy:
|
||||
## StrategyType
|
||||
## Can be set to RollingUpdate or OnDelete
|
||||
##
|
||||
type: RollingUpdate
|
||||
## Node affinity preset
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
|
||||
##
|
||||
key: ""
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
|
||||
##
|
||||
podManagementPolicy: OrderedReady
|
||||
## @param podAnnotations Annotations for Schema Registry pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## @param priorityClassName Schema Registry pod priority class name
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
|
||||
##
|
||||
topologySpreadConstraints: {}
|
||||
## @param schedulerName Name of the k8s scheduler (other than default) for Schema Registry pods
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
|
||||
##
|
||||
terminationGracePeriodSeconds: ""
|
||||
## @param lifecycleHooks for the Schema Registry container(s) to automate configuration before or after startup
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## Schema Registry pods' Security Context.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enabled Controller pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup Set Controller pod's Security Context fsGroup
|
||||
## @param podSecurityContext.sysctls sysctl settings of the Schema Registry pods
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## sysctl settings
|
||||
## Example:
|
||||
## sysctls:
|
||||
## - name: net.core.somaxconn
|
||||
## value: "10000"
|
||||
##
|
||||
sysctls: []
|
||||
## Schema Registry containers' Security Context (only main container).
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## Schema Registry containers' resource requests and limits.
|
||||
## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
## Schema Registry pods' liveness and readiness probes. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 20
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 20
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
failureThreshold: 20
|
||||
successThreshold: 1
|
||||
## @param customLivenessProbe Custom livenessProbe that overrides the default one
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Custom readinessProbe that overrides the default one
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## @param customStartupProbe Custom startupProbe that overrides the default one
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param extraVolumes Optionally specify extra list of additional volumes for schema-registry pods
|
||||
## e.g:
|
||||
## extraVolumes:
|
||||
## - name: avro-properties
|
||||
## configMap:
|
||||
## name: avro-properties
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for schema-registry container(s)
|
||||
## e.g:
|
||||
## extraVolumeMounts:
|
||||
## - name: avro-properties
|
||||
## mountPath: /bitnami/schema-registry/etc/schema-registry/connect-avro-standalone.properties
|
||||
## subPath: connect-avro-standalone.properties
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param initContainers Add additional init containers to the Schema Registry pods.
|
||||
## e.g:
|
||||
## initContainers:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
initContainers: []
|
||||
## @param sidecars Add additional sidecar containers to the Schema Registry pods.
|
||||
## e.g:
|
||||
## sidecars:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
sidecars: []
|
||||
## Schema Registry Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that must still be available after the eviction
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable after the eviction. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## Schema Registry Autoscaling parameters.
|
||||
## @param autoscaling.enabled Enable autoscaling for replicas
|
||||
## @param autoscaling.minReplicas Minimum number of replicas
|
||||
## @param autoscaling.maxReplicas Maximum number of replicas
|
||||
## @param autoscaling.targetCPU Target CPU utilization percentage
|
||||
## @param autoscaling.targetMemory Target Memory utilization percentage
|
||||
##
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 11
|
||||
targetCPU: ""
|
||||
targetMemory: ""
|
||||
## @param autoscaling.customPodMetrics allows you to set a list of custom metrics to trigger the scaling.
|
||||
## e.g:
|
||||
## customPodMetrics:
|
||||
## - name: "requests_per_second"
|
||||
## averageValue: "2000m"
|
||||
customPodMetrics: []
|
||||
## @section Exposure Parameters
|
||||
## Schema Registry Service paramaters.
|
||||
##
|
||||
service:
|
||||
## @param service.type Kubernetes service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param service.ports.http Service HTTP port
|
||||
##
|
||||
ports:
|
||||
http: 8081
|
||||
## @param service.nodePorts.http Service HTTP node port
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
## @param service.clusterIP Schema Registry service clusterIP IP
|
||||
## e.g:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.externalTrafficPolicy Enable client source IP preservation
|
||||
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.loadBalancerIP loadBalancerIP if service type is LoadBalancer
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.annotations Annotations for Schema Registry service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.labels Labels for Schema Registry service
|
||||
##
|
||||
labels: {}
|
||||
## @param service.extraPorts Extra ports to expose in Schema Registry service (normally used with the `sidecars` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
|
||||
## Values: ClientIP or None
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Headless service properties
|
||||
##
|
||||
headless:
|
||||
## @param service.headless.annotations Annotations for the headless service.
|
||||
##
|
||||
annotations: {}
|
||||
## Network Policies
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: false
|
||||
## @param networkPolicy.allowExternal Don't require client label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## client label will have network access to the ports the application is listening
|
||||
## on. When true, the app will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
## Configure the ingress resource that allows you to access Schema Registry
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Enable ingress record generation for Schema Registry
|
||||
##
|
||||
enabled: false
|
||||
## @param ingress.pathType Ingress path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## @param ingress.hostname Default host for the ingress record
|
||||
##
|
||||
hostname: schema-registry.local
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: ""
|
||||
## @param ingress.path Default path for the ingress record
|
||||
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
|
||||
##
|
||||
path: /
|
||||
## @param ingress.annotations Additional custom annotations for the ingress record
|
||||
## NOTE: If `ingress.certManager=true`, annotation `kubernetes.io/tls-acme: "true"` will automatically be added
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
##
|
||||
annotations: {}
|
||||
## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
|
||||
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
|
||||
## You can:
|
||||
## - Use the `ingress.secrets` parameter to create this TLS secret
|
||||
## - Relay on cert-manager to create it by setting `ingress.certManager=true`
|
||||
## - Relay on Helm to create self-signed certificates by setting `ingress.tls=true` and `ingress.certManager=false`
|
||||
##
|
||||
tls: false
|
||||
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
||||
##
|
||||
selfSigned: false
|
||||
## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
|
||||
## e.g:
|
||||
## extraHosts:
|
||||
## - name: schema-registry.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
||||
## e.g:
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## e.g:
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - schema-registry.local
|
||||
## secretName: schema-registry.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets Custom TLS certificates as secrets
|
||||
## NOTE: 'key' and 'certificate' are expected in PEM format
|
||||
## NOTE: 'name' should line up with a 'secretName' set further up
|
||||
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
|
||||
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
## e.g:
|
||||
## secrets:
|
||||
## - name: schema-registry.local-tls
|
||||
## key: |-
|
||||
## -----BEGIN RSA PRIVATE KEY-----
|
||||
## ...
|
||||
## -----END RSA PRIVATE KEY-----
|
||||
## certificate: |-
|
||||
## -----BEGIN CERTIFICATE-----
|
||||
## ...
|
||||
## -----END CERTIFICATE-----
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: schema-registry.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: schema-registry
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section RBAC parameters
|
||||
## Schema Registry pods ServiceAccount.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
## @param serviceAccount.create Enable the creation of a ServiceAccount for Schema Registry pods
|
||||
##
|
||||
create: true
|
||||
## @param serviceAccount.name Name of the created ServiceAccount to use
|
||||
## If not set and create is true, a name is generated using the schema-registry.fullname template
|
||||
##
|
||||
name: ""
|
||||
## @param serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
|
||||
##
|
||||
annotations: {}
|
||||
## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @section Kafka chart parameters
|
||||
##
|
||||
## Kafka chart configuration
|
||||
## For information about these parameters, refer to:
|
||||
## https://github.com/bitnami/charts/blob/main/bitnami/kafka/values.yaml
|
||||
##
|
||||
kafka:
|
||||
## @param kafka.enabled Enable/disable Kafka chart installation
|
||||
##
|
||||
enabled: false
|
||||
## @param kafka.controller.replicaCount Number of Kafka controller-eligible (controller+broker) nodes
|
||||
##
|
||||
controller:
|
||||
replicaCount: 1
|
||||
## @param kafka.listeners.client.protocol Authentication protocol for communications with clients. Allowed protocols: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`
|
||||
##
|
||||
listeners:
|
||||
client:
|
||||
protocol: "PLAINTEXT"
|
||||
## @param kafka.service.ports.client Kafka svc port for client connections
|
||||
##
|
||||
service:
|
||||
ports:
|
||||
client: 9092
|
||||
## @param kafka.overrideConfiguration [object] Kafka common configuration override
|
||||
##
|
||||
overrideConfiguration:
|
||||
offsets.topic.replication.factor: 1
|
||||
## @param kafka.sasl.client.users Comma-separated list of usernames for Kafka client listener when SASL is enabled
|
||||
## @param kafka.sasl.client.passwords Comma-separated list of passwords for client listener when SASL is enabled, must match the number of client.users
|
||||
##
|
||||
sasl:
|
||||
client:
|
||||
users:
|
||||
- user
|
||||
passwords: ""
|
||||
##
|
||||
## External Kafka Configuration
|
||||
## All of these values are only used when kafka.enabled is set to false
|
||||
##
|
||||
externalKafka:
|
||||
## @param externalKafka.brokers Array of Kafka brokers to connect to. Format: protocol://broker_hostname:port
|
||||
##
|
||||
brokers:
|
||||
- SASL_PLAINTEXT://freeleaps-kafka-cluster-kafka-bootstrap.freeleaps-data-platform.svc.freeleaps.cluster:9092
|
||||
## @param externalKafka.listener.protocol Kafka listener protocol. Allowed protocols: PLAINTEXT, SASL_PLAINTEXT, SASL_SSL and SSL
|
||||
##
|
||||
listener:
|
||||
protocol: SASL_PLAINTEXT
|
||||
## Authentication parameters
|
||||
## @param externalKafka.sasl.user User for SASL authentication
|
||||
## @param externalKafka.sasl.password Password for SASL authentication
|
||||
## @param externalKafka.sasl.existingSecret Name of the existing secret containing a password for SASL authentication (under the key named "client-passwords")
|
||||
##
|
||||
sasl:
|
||||
user: freeleaps-user
|
||||
password: ""
|
||||
existingSecret: "schema-registry-kafka-auth"
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: zookeeper-vpa
|
||||
namespace: freeleaps-data-platform
|
||||
spec:
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: '*'
|
||||
controlledResources:
|
||||
- cpu
|
||||
- memory
|
||||
maxAllowed:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
name: zookeeper
|
||||
updatePolicy:
|
||||
updateMode: "Auto"
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
# CODEOWNERS
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
* @dotdc
|
||||
@ -0,0 +1,48 @@
|
||||
name: Bug Report
|
||||
description: File a bug report
|
||||
title: "[bug] "
|
||||
labels: ["bug"]
|
||||
assignees:
|
||||
- dotdc
|
||||
# Doc: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-issue-forms
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
**Please make sure you don't accidentally share sensitive information while filing this issue.**
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: A clear and concise description of what the bug is.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How to reproduce?
|
||||
description: Steps to reproduce the bug.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about the problem here.
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
@ -0,0 +1,29 @@
|
||||
name: Feature request
|
||||
description: Suggest an idea for this project
|
||||
title: "[enhancement] "
|
||||
labels: ["enhancement"]
|
||||
assignees:
|
||||
- dotdc
|
||||
# Doc: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-issue-forms
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
**Please make sure you don't accidentally share sensitive information while filing this issue.**
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the enhancement you'd like
|
||||
description: A clear and concise description of what should be added to this project.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about your feature request here.
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
@ -0,0 +1,21 @@
|
||||
# Pull Request
|
||||
|
||||
## Required Fields
|
||||
|
||||
### :mag_right: What kind of change is it?
|
||||
|
||||
- fix | feat | docs | chore | ci
|
||||
|
||||
### :dart: What has been changed and why do we need it?
|
||||
|
||||
- ...
|
||||
|
||||
## Optional Fields
|
||||
|
||||
### :heavy_check_mark: Which issue(s) this PR fixes?
|
||||
|
||||
- ...
|
||||
|
||||
### :speech_balloon: Additional information?
|
||||
|
||||
- ...
|
||||
@ -0,0 +1,33 @@
|
||||
name: pre-commit-checks
|
||||
|
||||
# Doc: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
# Allow this workflow to be manually triggered
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pre-commit-checks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Doc: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout ${{ github.repository }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Doc: https://github.com/marketplace/actions/setup-python
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
||||
# Install pre-commit
|
||||
- name: Install pre-commit
|
||||
run: pip install pre-commit
|
||||
|
||||
# Run pre-commit checks
|
||||
- name: Run pre-commit checks
|
||||
run: pre-commit run --all-files
|
||||
@ -0,0 +1,34 @@
|
||||
name: release
|
||||
|
||||
# Doc: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'dashboards/**/*.json'
|
||||
# Allow this workflow to be manually triggered
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Doc: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout ${{ github.repository }}
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Doc: https://github.com/marketplace/actions/setup-node-js-environment
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
# Doc: https://github.com/semantic-release/semantic-release/blob/master/docs/recipes/ci-configurations/github-actions.md
|
||||
- name: Semantic Release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PAT }}
|
||||
run: npx semantic-release
|
||||
@ -0,0 +1,27 @@
|
||||
# Doc: https://pre-commit.com
|
||||
|
||||
repos:
|
||||
# Default pre-commit hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-json
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- id: check-merge-conflict
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
|
||||
# Typos
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.29.4
|
||||
hooks:
|
||||
- id: typos
|
||||
|
||||
# Markdown linter
|
||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||
rev: v0.43.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args:
|
||||
- "--disable=MD013"
|
||||
@ -0,0 +1,10 @@
|
||||
{
|
||||
"branches": [
|
||||
"master"
|
||||
],
|
||||
"plugins": [
|
||||
"@semantic-release/commit-analyzer",
|
||||
"@semantic-release/release-notes-generator",
|
||||
"@semantic-release/github"
|
||||
]
|
||||
}
|
||||
@ -0,0 +1,132 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at <david@0xdc.me>.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ].\
|
||||
Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
@ -0,0 +1,20 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
|
||||
|
||||
## Project Scope
|
||||
|
||||
This project aims to offer a set of modern Grafana dashboards for Kubernetes.\
|
||||
Because setups could be very diverse, it is not possible to make theses dashboards universal.\
|
||||
Changes are welcome as soon as they work with [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack).
|
||||
|
||||
## How to Contribute
|
||||
|
||||
- **Submit an issue :** to report a bug, share an idea or open a discussion.
|
||||
- **Submit a pull request:** to share a fix or a feature.
|
||||
|
||||
## Best practices
|
||||
|
||||
- Bump dashboard(s) version(s)
|
||||
- [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) is preferred
|
||||
- [Signed commits](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff) is preferred
|
||||
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2020 David Calvert
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@ -0,0 +1,388 @@
|
||||
# grafana-dashboards-kubernetes <!-- omit in toc -->
|
||||
|
||||

|
||||
|
||||
## Table of contents <!-- omit in toc -->
|
||||
|
||||
- [Description](#description)
|
||||
- [Releases](#releases)
|
||||
- [Features](#features)
|
||||
- [Dashboards](#dashboards)
|
||||
- [Installation](#installation)
|
||||
- [Install manually](#install-manually)
|
||||
- [Install via grafana.com](#install-via-grafanacom)
|
||||
- [Install with ArgoCD](#install-with-argocd)
|
||||
- [Install with Helm values](#install-with-helm-values)
|
||||
- [Install as ConfigMaps](#install-as-configmaps)
|
||||
- [Install as ConfigMaps with Terraform](#install-as-configmaps-with-terraform)
|
||||
- [Install as GrafanaDashboard with Grafana Operator](#install-as-grafanadashboard-with-grafana-operator)
|
||||
- [Known issue(s)](#known-issues)
|
||||
- [Broken panels due to a too-high resolution](#broken-panels-due-to-a-too-high-resolution)
|
||||
- [Broken panels on k8s-views-nodes when a node changes its IP address](#broken-panels-on-k8s-views-nodes-when-a-node-changes-its-ip-address)
|
||||
- [Broken panels on k8s-views-nodes due to the nodename label](#broken-panels-on-k8s-views-nodes-due-to-the-nodename-label)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
## Description
|
||||
|
||||
This repository contains a modern set of [Grafana](https://github.com/grafana/grafana) dashboards for [Kubernetes](https://github.com/kubernetes/kubernetes).\
|
||||
They are inspired by many other dashboards from `kubernetes-mixin` and `grafana.com`.
|
||||
|
||||
More information about them in my article: [A set of modern Grafana dashboards for Kubernetes](https://0xdc.me/blog/a-set-of-modern-grafana-dashboards-for-kubernetes/)
|
||||
|
||||
You can also download them on [Grafana.com](https://grafana.com/grafana/dashboards/?plcmt=top-nav&cta=downloads&search=dotdc).
|
||||
|
||||
## Releases
|
||||
|
||||
This repository follows [semantic versioning](https://semver.org) for releases.\
|
||||
It relies on [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) to automate releases using [semantic-release](https://github.com/semantic-release/semantic-release).
|
||||
|
||||
## Features
|
||||
|
||||
These dashboards are made and tested for the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) chart, but they should work well with others as soon as you have [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) and [prometheus-node-exporter](https://github.com/prometheus/node_exporter) installed on your Kubernetes cluster.
|
||||
|
||||
They are not backward compatible with older Grafana versions because they try to take advantage of Grafana's newest features like:
|
||||
|
||||
- `gradient mode` introduced in Grafana 8.1 ([Grafana Blog post](https://grafana.com/blog/2021/09/10/new-in-grafana-8.1-gradient-mode-for-time-series-visualizations-and-dynamic-panel-configuration/))
|
||||
- `time series` visualization panel introduced in Grafana 7.4 ([Grafana Blog post](https://grafana.com/blog/2021/02/10/how-the-new-time-series-panel-brings-major-performance-improvements-and-new-visualization-features-to-grafana-7.4/))
|
||||
- `$__rate_interval` variable introduced in Grafana 7.2 ([Grafana Blog post](https://grafana.com/blog/2020/09/28/new-in-grafana-7.2-__rate_interval-for-prometheus-rate-queries-that-just-work/))
|
||||
|
||||
They also have a `Prometheus Datasource` variable so they will work on a federated Grafana instance.
|
||||
|
||||
As an example, here's how the `Kubernetes / Views / Global` dashboard looks like:
|
||||
|
||||

|
||||
|
||||
## Dashboards
|
||||
|
||||
| File name | Description | Screenshot |
|
||||
|:---------------------------|:------------|:----------:|
|
||||
| k8s-addons-prometheus.json | Dashboard for Prometheus. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-addons-prometheus.png) |
|
||||
| k8s-addons-trivy-operator.json | Dashboard for the Trivy Operator from Aqua Security. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-addons-trivy-operator.png) |
|
||||
| k8s-system-api-server.json | Dashboard for the API Server Kubernetes component. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-system-api-server.png) |
|
||||
| k8s-system-coredns.json | Show information on the CoreDNS Kubernetes component. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-system-coredns.png) |
|
||||
| k8s-views-global.json | `Global` level view dashboard for Kubernetes. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-views-global.png) |
|
||||
| k8s-views-namespaces.json | `Namespaces` level view dashboard for Kubernetes. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-views-namespaces.png) |
|
||||
| k8s-views-nodes.json | `Nodes` level view dashboard for Kubernetes. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-views-nodes.png) |
|
||||
| k8s-views-pods.json | `Pods` level view dashboard for Kubernetes. | [LINK](https://raw.githubusercontent.com/dotdc/media/main/grafana-dashboards-kubernetes/k8s-views-pods.png) |
|
||||
|
||||
## Installation
|
||||
|
||||
In most cases, you will need to clone this repository (or your fork):
|
||||
|
||||
```terminal
|
||||
git clone https://github.com/dotdc/grafana-dashboards-kubernetes.git
|
||||
cd grafana-dashboards-kubernetes
|
||||
```
|
||||
|
||||
If you plan to deploy these dashboards using [ArgoCD](#install-with-argocd), [ConfigMaps](#install-as-configmaps) or [Terraform](#install-as-configmaps-with-terraform), you will also need to enable and configure the `dashboards sidecar` on the Grafana Helm chart to get the dashboards loaded in your Grafana instance:
|
||||
|
||||
```yaml
|
||||
# kube-prometheus-stack values
|
||||
grafana:
|
||||
sidecar:
|
||||
dashboards:
|
||||
enabled: true
|
||||
defaultFolderName: "General"
|
||||
label: grafana_dashboard
|
||||
labelValue: "1"
|
||||
folderAnnotation: grafana_folder
|
||||
searchNamespace: ALL
|
||||
provider:
|
||||
foldersFromFilesStructure: true
|
||||
```
|
||||
|
||||
### Install manually
|
||||
|
||||
On the WebUI of your Grafana instance, put your mouse over the `+` sign on the left menu, then click on `Import`.\
|
||||
Once you are on the Import page, you can upload the JSON files one by one from your local copy using the `Upload JSON file` button.
|
||||
|
||||
### Install via grafana.com
|
||||
|
||||
On the WebUI of your Grafana instance, put your mouse over the `+` sign on the left menu, then click on `Import`.\
|
||||
Once you are on the Import page, you can put the grafana.com dashboard ID (see table below) under `Import via grafana.com` then click on the `Load` button. Repeat for each dashboard.
|
||||
|
||||
Grafana.com dashboard id list:
|
||||
|
||||
| Dashboard | ID |
|
||||
|:-----------------------------------|:------|
|
||||
| k8s-addons-prometheus.json | 19105 |
|
||||
| k8s-addons-trivy-operator.json | 16337 |
|
||||
| k8s-system-api-server.json | 15761 |
|
||||
| k8s-system-coredns.json | 15762 |
|
||||
| k8s-views-global.json | 15757 |
|
||||
| k8s-views-namespaces.json | 15758 |
|
||||
| k8s-views-nodes.json | 15759 |
|
||||
| k8s-views-pods.json | 15760 |
|
||||
|
||||
### Install with ArgoCD
|
||||
|
||||
If you are using ArgoCD, this will deploy the dashboards in the default project of ArgoCD:
|
||||
|
||||
```terminal
|
||||
kubectl apply -f argocd-app.yml
|
||||
```
|
||||
|
||||
You will also need to enable and configure the Grafana `dashboards sidecar` as described in [Installation](#installation).
|
||||
|
||||
### Install with Helm values
|
||||
|
||||
If you use the official Grafana helm chart or kube-prometheus-stack, you can install the dashboards directly using the `dashboardProviders` & `dashboards` helm chart values.
|
||||
|
||||
Depending on your setup, add or merge the following block example to your helm chart values.\
|
||||
The example is for [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), for the official [Grafana helm chart](https://github.com/grafana/helm-charts/tree/main/charts/grafana), remove the first line (`grafana:`), and reduce the indentation level of the entire block.
|
||||
|
||||
```yaml
|
||||
grafana:
|
||||
# Provision grafana-dashboards-kubernetes
|
||||
dashboardProviders:
|
||||
dashboardproviders.yaml:
|
||||
apiVersion: 1
|
||||
providers:
|
||||
- name: 'grafana-dashboards-kubernetes'
|
||||
orgId: 1
|
||||
folder: 'Kubernetes'
|
||||
type: file
|
||||
disableDeletion: true
|
||||
editable: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/grafana-dashboards-kubernetes
|
||||
dashboards:
|
||||
grafana-dashboards-kubernetes:
|
||||
k8s-system-api-server:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json
|
||||
token: ''
|
||||
k8s-system-coredns:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json
|
||||
token: ''
|
||||
k8s-views-global:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json
|
||||
token: ''
|
||||
k8s-views-namespaces:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||
token: ''
|
||||
k8s-views-nodes:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json
|
||||
token: ''
|
||||
k8s-views-pods:
|
||||
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||
token: ''
|
||||
```
|
||||
|
||||
### Install as ConfigMaps
|
||||
|
||||
Grafana dashboards can be provisioned as Kubernetes ConfigMaps if you configure the [dashboard sidecar](https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml#L667) available on the official [Grafana Helm Chart](https://github.com/grafana/helm-charts/tree/main/charts/grafana).
|
||||
|
||||
To build the ConfigMaps and output them on STDOUT :
|
||||
|
||||
```terminal
|
||||
kubectl kustomize .
|
||||
```
|
||||
|
||||
*Note: no namespace is set by default, you can change that in the `kustomization.yaml` file.*
|
||||
|
||||
To build and deploy them directly on your Kubernetes cluster :
|
||||
|
||||
```terminal
|
||||
kubectl apply -k . -n monitoring
|
||||
```
|
||||
|
||||
You will also need to enable and configure the Grafana `dashboards sidecar` as described in [Installation](#installation).
|
||||
|
||||
*Note: you can change the namespace if needed.*
|
||||
|
||||
### Install as ConfigMaps with Terraform
|
||||
|
||||
If you use Terraform to provision your Kubernetes resources, you can convert the generated ConfigMaps to Terraform code using [tfk8s](https://github.com/jrhouston/tfk8s).
|
||||
|
||||
To build and convert ConfigMaps to Terraform code :
|
||||
|
||||
```terminal
|
||||
kubectl kustomize . | tfk8s
|
||||
```
|
||||
|
||||
You will also need to enable and configure the Grafana `dashboards sidecar` as described in [Installation](#installation).
|
||||
|
||||
*Note: no namespace is set by default, you can change that in the `kustomization.yaml` file.*
|
||||
|
||||
### Install as GrafanaDashboard with Grafana Operator
|
||||
|
||||
If you use Grafana Operator to provision your Grafana dashboards, you can use the following manifests:
|
||||
|
||||
Make sure to use your proper namespace.
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-system-api-server
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json"
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-system-coredns
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json"
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-views-global
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json"
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-views-namespaces
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json"
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-views-nodes
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json"
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: k8s-views-pods
|
||||
namespace: monitoring
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: "grafana"
|
||||
url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json"
|
||||
```
|
||||
|
||||
## Known issue(s)
|
||||
|
||||
### Broken panels due to a too-high resolution
|
||||
|
||||
A user reported in [#50](https://github.com/dotdc/grafana-dashboards-kubernetes/issues/50) that some panels were broken because the default value of the `$resolution` variable was too low. The root cause hasn't been identified precisely, but he was using Grafana Agent & Grafana Mimir. Changing the `$resolution` variable to a higher value (a lower resolution) will likely solve the issue.
|
||||
To make the fix permanent, you can configure the `Scrape interval` in your Grafana Datasource to a working value for your setup.
|
||||
|
||||
### Broken panels on k8s-views-nodes when a node changes its IP address
|
||||
|
||||
To make this dashboard more convenient, there's a small variable hack to display `node` instead of `instance`.
|
||||
Because of that, some panels could lack data when a node changes its IP address as reported in [#102](https://github.com/dotdc/grafana-dashboards-kubernetes/issues/102).
|
||||
|
||||
No easy fix for this scenario yet, but it should be a corner case for most people.
|
||||
Feel free to reopen the issue if you have ideas to fix this.
|
||||
|
||||
### Broken panels on k8s-views-nodes due to the nodename label
|
||||
|
||||
The `k8s-views-nodes` dashboard will have many broken panels if the `node` label from `kube_node_info` doesn't match the `nodename` label from `node_uname_info`.
|
||||
|
||||
This situation can happen on certain deployments of the node exporter running inside Kubernetes(e.g. via a `DaemonSet`), where `nodename` takes a different value than the node name as understood by the Kubernetes API.
|
||||
|
||||
Below are some ways to relabel the metric to force the `nodename` label to the appropriate value, depending on the way the collection agent is deployed:
|
||||
|
||||
#### Directly through the Prometheus configuration file <!-- omit in toc -->
|
||||
|
||||
Assuming the node exporter job is defined through `kubernetes_sd_config`, you can take advantage of the internal discovery labels and fix this by adding the following relabeling rule to the job:
|
||||
|
||||
```yaml
|
||||
# File: prometheus.yaml
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
relabel_configs:
|
||||
# Add this
|
||||
- action: replace
|
||||
source_labels: [ __meta_kubernetes_pod_node_name]
|
||||
target_label: nodename
|
||||
```
|
||||
|
||||
#### Through a `ServiceMonitor` <!-- omit in toc -->
|
||||
|
||||
If using the Prometheus operator or the Grafana agent in operator mode, the scrape job should instead be configured via a `ServiceMonitor` that will dynamically edit the Prometheus configuration file. In that case, the relabeling has a slightly different syntax:
|
||||
|
||||
```yaml
|
||||
# File: service-monitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
relabelings:
|
||||
# Add this
|
||||
- action: replace
|
||||
sourceLabels: [ __meta_kubernetes_node_name]
|
||||
targetLabel: nodename
|
||||
```
|
||||
|
||||
As a convenience, if using the [kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), this added rule can be directly specified in your values.yaml:
|
||||
|
||||
```yaml
|
||||
# File: kube-prometheus-stack-values.yaml
|
||||
prometheus-node-exporter:
|
||||
prometheus:
|
||||
monitor:
|
||||
relabelings:
|
||||
- action: replace
|
||||
sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
targetLabel: nodename
|
||||
```
|
||||
|
||||
#### With Grafana Agent Flow mode <!-- omit in toc -->
|
||||
|
||||
The Grafana Agent can [bundle its own node_exporter](https://grafana.com/docs/agent/v0.33/flow/reference/components/prometheus.exporter.unix/). In that case, relabeling can be done this way:
|
||||
|
||||
```river
|
||||
prometheus.exporter.unix {
|
||||
}
|
||||
|
||||
prometheus.scrape "node_exporter" {
|
||||
targets = prometheus.exporter.unix.targets
|
||||
forward_to = [prometheus.relabel.node_exporter.receiver]
|
||||
|
||||
job_name = "node-exporter"
|
||||
}
|
||||
|
||||
prometheus.relabel "node_exporter" {
|
||||
forward_to = [prometheus.remote_write.sink.receiver]
|
||||
|
||||
rule {
|
||||
replacement = env("HOSTNAME")
|
||||
target_label = "nodename"
|
||||
}
|
||||
|
||||
rule {
|
||||
# The default job name is "integrations/node_exporter" and needs to be replaced
|
||||
replacement = "node-exporter"
|
||||
target_label = "job"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `HOSTNAME` environment variable is injected by default by the [Grafana Agent helm chart](https://github.com/grafana/agent/blob/93cb1a2718f6fc90fd06ef33b6bcff519dbed662/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml#L25)
|
||||
|
||||
## Contributing
|
||||
|
||||
Feel free to contribute to this project:
|
||||
|
||||
- Give a GitHub ⭐ if you like it
|
||||
- Create an [Issue](https://github.com/dotdc/grafana-dashboards-kubernetes/issues) to make a feature request, report a bug or share an idea.
|
||||
- Create a [Pull Request](https://github.com/dotdc/grafana-dashboards-kubernetes/pulls) if you want to share code or anything useful to this project.
|
||||
@ -0,0 +1,29 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: grafana-dashboards-kubernetes
|
||||
namespace: argocd
|
||||
labels:
|
||||
app.kubernetes.io/name: grafana-dashboards-kubernetes
|
||||
app.kubernetes.io/version: HEAD
|
||||
app.kubernetes.io/managed-by: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
spec:
|
||||
project: default # You may need to change this!
|
||||
source:
|
||||
path: ./
|
||||
repoURL: https://github.com/dotdc/grafana-dashboards-kubernetes
|
||||
targetRevision: HEAD
|
||||
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: monitoring
|
||||
syncPolicy:
|
||||
## https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
- Replace=true
|
||||
@ -0,0 +1 @@
|
||||
*.json
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,43 @@
|
||||
# Global options
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
commonAnnotations:
|
||||
grafana_folder: "Kubernetes"
|
||||
#namespace: monitoring
|
||||
|
||||
# Generate a ConfigMap for each dashboard
|
||||
configMapGenerator:
|
||||
|
||||
#################################################
|
||||
# Views Dashboards
|
||||
#################################################
|
||||
|
||||
- name: dashboards-k8s-views-global
|
||||
files: [ dashboards/k8s-views-global.json ]
|
||||
|
||||
- name: dashboards-k8s-views-namespaces
|
||||
files: [ dashboards/k8s-views-namespaces.json ]
|
||||
|
||||
- name: dashboards-k8s-views-nodes
|
||||
files: [ dashboards/k8s-views-nodes.json ]
|
||||
|
||||
- name: dashboards-k8s-views-pods
|
||||
files: [ dashboards/k8s-views-pods.json ]
|
||||
|
||||
#################################################
|
||||
# System / Addons Dashboards
|
||||
#################################################
|
||||
|
||||
- name: dashboards-k8s-system-api-server
|
||||
files: [ dashboards/k8s-system-api-server.json ]
|
||||
|
||||
- name: dashboards-k8s-system-coredns
|
||||
files: [ dashboards/k8s-system-coredns.json ]
|
||||
|
||||
- name: dashboards-k8s-addons-prometheus
|
||||
files: [ dashboards/k8s-addons-prometheus.json ]
|
||||
|
||||
- name: dashboards-k8s-addons-trivy-operator
|
||||
files: [ dashboards/k8s-addons-trivy-operator.json ]
|
||||
@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Default version if not provided
|
||||
DEFAULT_VERSION="2.8.2"
|
||||
VERSION=${1:-$DEFAULT_VERSION}
|
||||
|
||||
# Base directory paths
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DASHBOARDS_DIR="$SCRIPT_DIR/$VERSION/dashboards"
|
||||
OUTPUT_DIR="$SCRIPT_DIR"
|
||||
|
||||
# Validate version directory exists
|
||||
if [[ ! -d "$DASHBOARDS_DIR" ]]; then
|
||||
echo "Error: Version directory '$DASHBOARDS_DIR' does not exist"
|
||||
echo "Available versions:"
|
||||
ls -1 "$SCRIPT_DIR" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$' || echo " No version directories found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate dashboards exist
|
||||
if [[ ! -d "$DASHBOARDS_DIR" ]] || [[ -z "$(ls -A "$DASHBOARDS_DIR"/*.json 2>/dev/null)" ]]; then
|
||||
echo "Error: No JSON dashboard files found in '$DASHBOARDS_DIR'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Generating ConfigMaps from dashboard files in version $VERSION..."
|
||||
|
||||
# Process each JSON file in the dashboards directory
|
||||
for json_file in "$DASHBOARDS_DIR"/*.json; do
|
||||
if [[ ! -f "$json_file" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Extract filename without extension
|
||||
filename=$(basename "$json_file" .json)
|
||||
|
||||
# Generate ConfigMap name by replacing underscores with hyphens
|
||||
configmap_name="${filename//_/-}-dashboard"
|
||||
|
||||
# Output YAML file path
|
||||
yaml_file="$OUTPUT_DIR/$filename.yaml"
|
||||
|
||||
echo "Processing: $json_file -> $yaml_file"
|
||||
|
||||
# Generate ConfigMap YAML
|
||||
cat > "$yaml_file" << EOF
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: $configmap_name
|
||||
namespace: freeleaps-monitoring-system
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
$filename.json: |-
|
||||
EOF
|
||||
|
||||
# Add JSON content with proper indentation
|
||||
sed 's/^/ /' "$json_file" >> "$yaml_file"
|
||||
|
||||
echo "Generated: $yaml_file"
|
||||
done
|
||||
|
||||
echo "ConfigMap generation completed for version $VERSION"
|
||||
echo "Generated files are located in: $OUTPUT_DIR"
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -14,3 +14,5 @@ grafana,https://grafana.github.io/helm-charts,force-update
|
||||
fluent,https://fluent.github.io/helm-charts,force-update
|
||||
pinot,https://raw.githubusercontent.com/apache/pinot/master/helm,force-update
|
||||
starrocks,https://starrocks.github.io/starrocks-kubernetes-operator,force-update
|
||||
kafbat-ui,https://kafbat.github.io/helm-charts,force-update
|
||||
pmint93,https://pmint93.github.io/helm-charts,force-update
|
||||
|
||||
@ -16,7 +16,7 @@ chat:
|
||||
registry: docker.io
|
||||
repository: null
|
||||
name: chat
|
||||
tag: snapshot-9c7bb61
|
||||
tag: snapshot-4c13092
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@ -16,7 +16,7 @@ freeleaps:
|
||||
registry: docker.io
|
||||
repository: null
|
||||
name: backend
|
||||
tag: snapshot-9c7bb61
|
||||
tag: snapshot-4c13092
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@ -8,7 +8,7 @@ frontend:
|
||||
registry: docker.io
|
||||
repository: null
|
||||
name: frontend
|
||||
tag: snapshot-9c7bb61
|
||||
tag: snapshot-4c13092
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
Loading…
Reference in New Issue
Block a user