## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
## Set to true if you would like to see extra information on logs
##
debug:false
## @param clusterId Kafka Kraft cluster ID (ignored if existingKraftSecret is set). A random cluster ID will be generated the 1st time Kraft is initialized if not set.
## NOTE: Already initialized Kafka nodes will use cluster ID stored in their persisted storage.
## If reusing existing PVCs, make sure the cluster ID is set matching the stored cluster ID, otherwise new nodes will fail to join the cluster.
## In case the cluster ID stored in the secret does not match the value stored in /bitnami/kafka/data/meta.properties, remove the secret and upgrade the chart setting the correct value.
##
clusterId:"freeleaps"
## @param existingKraftSecret Name of the secret containing the Kafka KRaft Cluster ID and one directory ID per controller replica
##
existingKraftSecret:""
## @param config Specify content for Kafka configuration (auto-generated based on other parameters otherwise)
## NOTE: This will override the configuration based on values, please act carefully
## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g:
## process.roles: broker
## ... will be transformed to:
## process.roles=broker
##
config:{}
## @param overrideConfiguration Kafka common configuration override. Values defined here takes precedence over the ones defined at `config`
##
overrideConfiguration:{}
## @param existingConfigmap Name of an existing ConfigMap with the Kafka configuration
##
existingConfigmap:""
## @param secretConfig Additional configuration to be appended at the end of the generated Kafka configuration (store in a secret)
##
secretConfig:""
## @param existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration
## The key for the configuration should be: server-secret.properties
## NOTE: This will override secretConfig value
##
existingSecretConfig:""
## @param log4j2 Specify content for Kafka log4j2 configuration (default one is used otherwise)
## @param listeners.client.name Name for the Kafka client listener
## @param listeners.client.containerPort Port for the Kafka client listener
## @param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL'
## @param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required'
client:
containerPort:9092
protocol:SASL_PLAINTEXT
name:CLIENT
sslClientAuth:""
## @param listeners.controller.name Name for the Kafka controller listener
## @param listeners.controller.containerPort Port for the Kafka controller listener
## @param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL'
## @param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required'
## @param listeners.interbroker.name Name for the Kafka inter-broker listener
## @param listeners.interbroker.containerPort Port for the Kafka inter-broker listener
## @param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL'
## @param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required'
interbroker:
containerPort:9094
protocol:SASL_PLAINTEXT
name:INTERNAL
sslClientAuth:""
## @param listeners.external.containerPort Port for the Kafka external listener
## @param listeners.external.protocol Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL'
## @param listeners.external.name Name for the Kafka external listener
## @param listeners.external.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required'
external:
containerPort:9095
protocol:SASL_PLAINTEXT
name:EXTERNAL
sslClientAuth:""
## @param listeners.extraListeners Array of listener objects to be appended to already existing listeners
## E.g.
## extraListeners:
## - name: CUSTOM
## containerPort: 9097
## protocol: SASL_PLAINTEXT
## sslClientAuth: ""
##
extraListeners:[]
## NOTE: If set, below values will override configuration set using the above values (extraListeners.*, controller.*, interbroker.*, client.* and external.*)
## @param listeners.overrideListeners Overrides the Kafka 'listeners' configuration setting.
## @param listeners.advertisedListeners Overrides the Kafka 'advertised.listener' configuration setting.
## @param listeners.securityProtocolMap Overrides the Kafka 'security.protocol.map' configuration setting.
overrideListeners:""
advertisedListeners:""
securityProtocolMap:""
## @section Kafka SASL parameters
## Kafka SASL settings for authentication, required if SASL_PLAINTEXT or SASL_SSL listeners are configured
##
sasl:
## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER`
## NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured.
## @param sasl.interBrokerMechanism SASL mechanism for inter broker communication.
##
interBrokerMechanism:PLAIN
## @param sasl.controllerMechanism SASL mechanism for controller communications.
##
controllerMechanism:PLAIN
## Settings for OAuthBearer mechanism
## @param sasl.oauthbearer.tokenEndpointUrl The URL for the OAuth/OIDC identity provider
## @param sasl.oauthbearer.jwksEndpointUrl The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved
## @param sasl.oauthbearer.expectedAudience The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences
## @param sasl.oauthbearer.subClaimName The OAuth claim name for the subject.
##
oauthbearer:
tokenEndpointUrl:""
jwksEndpointUrl:""
expectedAudience:""
subClaimName:"sub"
## Credentials for inter-broker communications.
## @param sasl.interbroker.user Username for inter-broker communications when SASL is enabled
## @param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated.
## @param sasl.interbroker.clientId Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER
## @param sasl.interbroker.clientSecret Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated.
##
interbroker:
user:inter_broker_user
password:"r6Y@QTb*7BQN@hDGsN"
clientId:inter_broker_client
clientSecret:""
## Credentials for controller communications.
## @param sasl.controller.user Username for controller communications when SASL is enabled
## @param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated.
## @param sasl.controller.clientId Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER
## @param sasl.controller.clientSecret Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated.
##
controller:
user:controller_user
password:"r6Y@QTb*7BQN@hDGsN"
clientId:controller_broker_client
clientSecret:""
## Credentials for client communications.
## @param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled
## @param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users
##
client:
users:
- freeleaps
passwords:"r6Y@QTb*7BQN@hDGsN"
## @param sasl.existingSecret Name of the existing secret containing credentials for client.users, interbroker.user and controller.user
## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create:
## The client secrets are only required when using OAuthBearer as SASL mechanism.
## Client, inter-broker and controller passwords are only required if the SASL mechanism includes something other than OAuthBearer.
##
existingSecret:""
## @section Kafka TLS parameters
## Kafka TLS settings, required if SSL or SASL_SSL listeners are configured
##
tls:
## @param tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM`
##
type:JKS
## @param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert.
## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA.
##
pemChainIncluded:false
## @param tls.autoGenerated.enabled Enable automatic generation of TLS certificates (only supported if `tls.type` is `PEM`)
## @param tls.autoGenerated.engine Mechanism to generate the certificates (allowed values: helm, cert-manager)
## @param tls.autoGenerated.customAltNames List of additional subject alternative names (SANs) for the automatically generated TLS certificates.
## @param tls.autoGenerated.certManager.existingIssuer The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine)
## @param tls.autoGenerated.certManager.existingIssuerKind Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine)
## @param tls.autoGenerated.certManager.keyAlgorithm Key algorithm for the certificates (only for `cert-manager` engine)
## @param tls.autoGenerated.certManager.keySize Key size for the certificates (only for `cert-manager` engine)
## @param tls.autoGenerated.certManager.duration Duration for the certificates (only for `cert-manager` engine)
## @param tls.autoGenerated.certManager.renewBefore Renewal period for the certificates (only for `cert-manager` engine)
## NOTE: Alternatively, a single keystore can be provided for all nodes under the key 'kafka.keystore.jks', this keystore will be used by all nodes unless overridden by the 'kafka-<role>-X.keystore.jks' file
##
## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key.
## Create these secrets following the steps below:
## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA
## 2) Rename your CA file to `ca.crt`.
## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker.
## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker.
## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create):
## NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'tls.crt' and 'tls.key'. These certificates will be used by all nodes unless overridden by the 'kafka-<role>-X.key' and 'kafka-<role>-X.crt' files
##
existingSecret:""
## @param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`)
##
passwordsSecret:""
## @param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore.
##
passwordsSecretKeystoreKey:keystore-password
## @param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore.
##
passwordsSecretTruststoreKey:truststore-password
## @param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'.
##
passwordsSecretPemPasswordKey:""
## @param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided.
## When using tls.type=PEM, the generated keystore will use this password or randomly generate one.
##
keystorePassword:""
## @param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided.
## When using tls.type=PEM, the generated keystore will use this password or randomly generate one.
##
truststorePassword:""
## @param tls.keyPassword Password to access the PEM key when it is password-protected.
## Note: ignored when using 'tls.passwordsSecret'
##
keyPassword:""
## @param tls.jksKeystoreKey The secret key from the `tls.existingSecret` containing the keystore
## Note: ignored when using 'pem' format for certificates.
##
jksKeystoreKey:""
## @param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret`
## Note: ignored when using 'pem' format for certificates.
##
jksTruststoreSecret:""
## @param tls.jksTruststoreKey The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore
## Note: ignored when using 'pem' format for certificates.
##
jksTruststoreKey:""
## @param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate
## Disable server host name verification by setting it to an empty string.
## @skip defaultInitContainers.volumePermissions.image.tag "volume-permissions" init-containers' image tag (immutable tags are recommended)
## @param defaultInitContainers.volumePermissions.image.digest "volume-permissions" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "volume-permissions" init-containers
## @param defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser Set runAsUser in "volume-permissions" init-containers' Security Context
## @param defaultInitContainers.volumePermissions.containerSecurityContext.privileged Set privileged in "volume-permissions" init-containers' Security Context
## @param defaultInitContainers.volumePermissions.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "volume-permissions" init-containers' Security Context
## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.add List of capabilities to be added in "volume-permissions" init-containers
## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "volume-permissions" init-containers
## @param defaultInitContainers.volumePermissions.containerSecurityContext.seccompProfile.type Set seccomp profile in "volume-permissions" init-containers
## @param defaultInitContainers.volumePermissions.resourcesPreset Set Kafka "volume-permissions" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.volumePermissions.resources is set (defaultInitContainers.volumePermissions.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"nano"
## @param defaultInitContainers.volumePermissions.resources Set Kafka "volume-permissions" init container requests and limits for different resources like CPU or memory (essential for production workloads)
## E.g:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources:{}
## Kafka "prepare-config" init container
## Used to prepare the Kafka configuration files for main containers to use them
## @param defaultInitContainers.prepareConfig.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "prepare-config" init-containers
## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsUser Set runAsUser in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsGroup Set runAsUser in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.privileged Set privileged in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "prepare-config" init-containers' Security Context
## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.add List of capabilities to be added in "prepare-config" init-containers
## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "prepare-config" init-containers
## @param defaultInitContainers.prepareConfig.containerSecurityContext.seccompProfile.type Set seccomp profile in "prepare-config" init-containers
## @param defaultInitContainers.prepareConfig.resourcesPreset Set Kafka "prepare-config" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.prepareConfig.resources is set (defaultInitContainers.prepareConfig.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"nano"
## @param defaultInitContainers.prepareConfig.resources Set Kafka "prepare-config" init container requests and limits for different resources like CPU or memory (essential for production workloads)
## E.g:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources:{}
## @param defaultInitContainers.prepareConfig.extraInit Additional content for the "prepare-config" init script, rendered as a template.
##
extraInit:""
## 'auto-discovery' init container
## Used to auto-detect LB IPs or node ports by querying the K8s API
## Note: RBAC might be required
##
autoDiscovery:
## @param defaultInitContainers.autoDiscovery.enabled Enable init container that auto-detects external IPs/ports by querying the K8s API
## @skip defaultInitContainers.autoDiscovery.image.tag "auto-discovery" init-containers' image tag (immutable tags are recommended)
## @param defaultInitContainers.autoDiscovery.image.digest "auto-discovery" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "auto-discovery" init-containers
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsUser Set runAsUser in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsGroup Set runAsUser in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.privileged Set privileged in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "auto-discovery" init-containers' Security Context
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.add List of capabilities to be added in "auto-discovery" init-containers
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "auto-discovery" init-containers
## @param defaultInitContainers.autoDiscovery.containerSecurityContext.seccompProfile.type Set seccomp profile in "auto-discovery" init-containers
##
containerSecurityContext:
enabled:true
seLinuxOptions:{}
runAsUser:1001
runAsGroup:1001
runAsNonRoot:true
readOnlyRootFilesystem:true
privileged:false
allowPrivilegeEscalation:false
capabilities:
add:[]
drop:["ALL"]
seccompProfile:
type:"RuntimeDefault"
## Kafka "auto-discovery" init container resource requests and limits
## @param defaultInitContainers.autoDiscovery.resourcesPreset Set Kafka "auto-discovery" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.autoDiscovery.resources is set (defaultInitContainers.autoDiscovery.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"nano"
## @param defaultInitContainers.autoDiscovery.resources Set Kafka "auto-discovery" init container requests and limits for different resources like CPU or memory (essential for production workloads)
## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes.
##
controllerOnly:false
## @param controller.quorumBootstrapServers Override the Kafka controller quorum bootstrap servers of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-eligible nodes.
##
quorumBootstrapServers:""
## @param controller.minId Minimal node.id values for controller-eligible nodes. Do not change after first initialization.
## Broker-only id increment their ID starting at this minimal value.
## We recommend setting this this value high enough, as IDs under this value will be used by controller-elegible nodes
##
minId:0
## @param controller.config Specify content for Kafka configuration for Kafka controller-eligible nodes (auto-generated based on other parameters otherwise)
## NOTE: This will override the configuration based on values, please act carefully
## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g:
## process.roles: controller
## ... will be transformed to:
## process.roles=controller
##
config:{}
## @param controller.overrideConfiguration Kafka configuration override for Kafka controller-eligible nodes. Values defined here takes precedence over the ones defined at `controller.config`
##
overrideConfiguration:{}
## @param controller.existingConfigmap Name of an existing ConfigMap with the Kafka configuration for Kafka controller-eligible nodes
##
existingConfigmap:""
## @param controller.secretConfig Additional configuration to be appended at the end of the generated Kafka configuration for Kafka controller-eligible nodes (store in a secret)
##
secretConfig:""
## @param controller.existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka controller-eligible nodes
## The key for the configuration should be: server-secret.properties
## NOTE: This will override secretConfig value
##
existingSecretConfig:""
## @param controller.heapOpts Kafka Java Heap size for controller-eligible nodes
## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
# resourcesPreset: "small"
## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## @param controller.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
## @param controller.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
## @param controller.enableServiceLinks Whether information about services should be injected into pod's environment variable
## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace.
## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`.
##
enableServiceLinks:true
## @param controller.schedulerName Name of the k8s scheduler (other than default)
## @param controller.autoscaling.vpa.annotations Annotations for VPA resource
##
annotations:{}
## @param controller.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
##
controlledResources:[]
## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod
## cpu: 200m
## memory: 100Mi
maxAllowed:{}
## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod
## cpu: 200m
## memory: 100Mi
minAllowed:{}
updatePolicy:
## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod
## Possible values are "Off", "Initial", "Recreate", and "Auto".
##
updateMode:Auto
hpa:
## @param controller.autoscaling.hpa.enabled Enable HPA for Kafka Controller
##
enabled:false
## @param controller.autoscaling.hpa.annotations Annotations for HPA resource
##
annotations:{}
## @param controller.autoscaling.hpa.minReplicas Minimum number of Kafka Controller replicas
##
minReplicas:""
## @param controller.autoscaling.hpa.maxReplicas Maximum number of Kafka Controller replicas
##
maxReplicas:""
## @param controller.autoscaling.hpa.targetCPU Target CPU utilization percentage
## @param controller.persistence.size PVC Storage Request for Kafka data volume
##
size:15Gi
## @param controller.persistence.annotations Annotations for the PVC
##
annotations:{}
## @param controller.persistence.labels Labels for the PVC
##
labels:{}
## @param controller.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it
## selector:
## matchLabels:
## app: my-app
##
selector:{}
## @param controller.persistence.mountPath Mount path of the Kafka data volume
##
mountPath:/bitnami/kafka
## Log Persistence parameters
##
logPersistence:
## @param controller.logPersistence.enabled Enable Kafka logs persistence using PVC
##
enabled:false
## @param controller.logPersistence.existingClaim A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template
##
existingClaim:""
## @param controller.logPersistence.storageClass PVC Storage Class for Kafka logs volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## @param controller.logPersistence.size PVC Storage Request for Kafka logs volume
##
size:8Gi
## @param controller.logPersistence.annotations Annotations for the PVC
##
annotations:{}
## @param controller.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it
## selector:
## matchLabels:
## app: my-app
##
selector:{}
## @param controller.logPersistence.mountPath Mount path of the Kafka logs volume
##
mountPath:/opt/bitnami/kafka/logs
## @section Broker-only statefulset parameters
##
broker:
## @param broker.replicaCount Number of Kafka broker-only nodes
##
replicaCount:0
## @param broker.minId Minimal node.id values for broker-only nodes. Do not change after first initialization.
## Broker-only id increment their ID starting at this minimal value.
## We recommend setting this this value high enough, as IDs under this value will be used by controller-eligible nodes
##
##
minId:100
## @param broker.config Specify content for Kafka configuration for Kafka broker-only nodes (auto-generated based on other parameters otherwise)
## NOTE: This will override the configuration based on values, please act carefully
## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g:
## process.roles: broker
## ... will be transformed to:
## process.roles=broker
##
config:{}
## @param broker.overrideConfiguration Kafka configuration override for Kafka broker-only nodes. Values defined here takes precedence over the ones defined at `broker.config`
##
overrideConfiguration:{}
## @param broker.existingConfigmap Name of an existing ConfigMap with the Kafka configuration for Kafka broker-only nodes
##
existingConfigmap:""
## @param broker.secretConfig Additional configuration to be appended at the end of the generated Kafka configuration for Kafka broker-only nodes (store in a secret)
##
secretConfig:""
## @param broker.existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka broker-only nodes
## The key for the configuration should be: server-secret.properties
## NOTE: This will override secretConfig value
##
existingSecretConfig:""
## @param broker.heapOpts Kafka Java Heap size for broker-only nodes
## @param broker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"small"
## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## @param broker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
## @param broker.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
## @param broker.enableServiceLinks Whether information about services should be injected into pod's environment variable
## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace.
## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`.
##
enableServiceLinks:true
## @param broker.schedulerName Name of the k8s scheduler (other than default)
## @param broker.autoscaling.vpa.annotations Annotations for VPA resource
##
annotations:{}
## @param broker.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
##
controlledResources:[]
## @param broker.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod
## cpu: 200m
## memory: 100Mi
maxAllowed:{}
## @param broker.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod
## cpu: 200m
## memory: 100Mi
minAllowed:{}
updatePolicy:
## @param broker.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod
## Possible values are "Off", "Initial", "Recreate", and "Auto".
##
updateMode:Auto
hpa:
## @param broker.autoscaling.hpa.enabled Enable HPA for Kafka Broker
##
enabled:false
## @param broker.autoscaling.hpa.annotations Annotations for HPA resource
##
annotations:{}
## @param broker.autoscaling.hpa.minReplicas Minimum number of Kafka Broker replicas
##
minReplicas:""
## @param broker.autoscaling.hpa.maxReplicas Maximum number of Kafka Broker replicas
##
maxReplicas:""
## @param broker.autoscaling.hpa.targetCPU Target CPU utilization percentage
## @param broker.persistence.annotations Annotations for the PVC
##
annotations:{}
## @param broker.persistence.labels Labels for the PVC
##
labels:{}
## @param broker.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it
## selector:
## matchLabels:
## app: my-app
##
selector:{}
## @param broker.persistence.mountPath Mount path of the Kafka data volume
##
mountPath:/bitnami/kafka
## Log Persistence parameters
##
logPersistence:
## @param broker.logPersistence.enabled Enable Kafka logs persistence using PVC
##
enabled:false
## @param broker.logPersistence.existingClaim A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template
##
existingClaim:""
## @param broker.logPersistence.storageClass PVC Storage Class for Kafka logs volume
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## @param broker.logPersistence.size PVC Storage Request for Kafka logs volume
##
size:8Gi
## @param broker.logPersistence.annotations Annotations for the PVC
##
annotations:{}
## @param broker.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it
## selector:
## matchLabels:
## app: my-app
##
selector:{}
## @param broker.logPersistence.mountPath Mount path of the Kafka logs volume
##
mountPath:/opt/bitnami/kafka/logs
## @section Traffic Exposure parameters
##
## Service parameters
##
service:
## @param service.type Kubernetes Service type
##
type:ClusterIP
## @param service.ports.client Kafka svc port for client connections
## @param service.ports.controller Kafka svc port for controller connections
## @param service.ports.interbroker Kafka svc port for inter-broker connections
## @param service.ports.external Kafka svc port for external connections
##
ports:
client:9092
controller:9093
interbroker:9094
external:9095
## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value)
##
extraPorts:[]
## @param service.nodePorts.client Node port for the Kafka client connections
## @param service.nodePorts.external Node port for the Kafka external connections
## NOTE: choose port between <30000-32767>
##
nodePorts:
client:""
external:""
## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
## @param externalAccess.controller.forceExpose If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes
##
forceExpose:false
## Parameters to configure K8s service(s) used to externally access Kafka brokers
## Note: A new service per broker will be created
##
service:
## @param externalAccess.controller.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP
##
type:LoadBalancer
## @param externalAccess.controller.service.ports.external Kafka port used for external access when service type is LoadBalancer
##
ports:
external:9094
## @param externalAccess.controller.service.loadBalancerClass Kubernetes Service Load Balancer class for external access when service type is LoadBalancer
## @param externalAccess.controller.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
loadBalancerIPs:[]
## @param externalAccess.controller.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerNames:
## - broker1.external.example.com
## - broker2.external.example.com
##
loadBalancerNames:[]
## @param externalAccess.controller.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount
## @param externalAccess.controller.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount
## e.g:
## nodePorts:
## - 30001
## - 30002
##
nodePorts:[]
## @param externalAccess.controller.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount
## e.g:
## externalIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
externalIPs:[]
## @param externalAccess.controller.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort
##
useHostIPs:false
## @param externalAccess.controller.service.usePodIPs using the MY_POD_IP address for external access.
##
usePodIPs:false
## @param externalAccess.controller.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP
## NodePort: If not specified, the container will try to get the kubernetes node external IP
## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured
##
domain:""
## @param externalAccess.controller.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready
## @param externalAccess.broker.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
loadBalancerIPs:[]
## @param externalAccess.broker.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount
## e.g:
## loadBalancerNames:
## - broker1.external.example.com
## - broker2.external.example.com
##
loadBalancerNames:[]
## @param externalAccess.broker.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount
## @param externalAccess.broker.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount
## e.g:
## nodePorts:
## - 30001
## - 30002
##
nodePorts:[]
## @param externalAccess.broker.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount
## e.g:
## externalIPs:
## - X.X.X.X
## - Y.Y.Y.Y
##
externalIPs:[]
## @param externalAccess.broker.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort
##
useHostIPs:false
## @param externalAccess.broker.service.usePodIPs using the MY_POD_IP address for external access.
##
usePodIPs:false
## @param externalAccess.broker.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP
## NodePort: If not specified, the container will try to get the kubernetes node external IP
## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured
##
domain:""
## @param externalAccess.broker.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled:true
## @param networkPolicy.allowExternal Don't require client label for connections
## When set to false, only pods with the correct client label will have network access to the port Kafka is
## listening on. When true, Kafka accept connections from any source (with the correct destination port).
##
allowExternal:true
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress:true
## @param networkPolicy.addExternalClientAccess Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true.
##
addExternalClientAccess:true
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress:[]
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress:[]
## @param networkPolicy.ingressPodMatchLabels [object] Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true.
## e.g:
## ingressPodMatchLabels:
## my-client: "true"
#
ingressPodMatchLabels:{}
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true.
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true.
## @param metrics.jmx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"micro"
## @param metrics.jmx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources
## - echo "Allow user to consume from any topic"
## - >-
## /opt/bitnami/kafka/bin/kafka-acls.sh
## --bootstrap-server $KAFKA_SERVICE
## --command-config $CLIENT_CONF
## --add
## --allow-principal User:user
## --consumer --topic *
## - "/opt/bitnami/kafka/bin/kafka-acls.sh
## --bootstrap-server $KAFKA_SERVICE
## --command-config $CLIENT_CONF
## --list"
##
extraProvisioningCommands:[]
## @param provisioning.parallel Number of provisioning commands to run at the same time
##
parallel:1
## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations
##
preScript:""
## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations
##
postScript:""
## Auth Configuration for kafka provisioning Job
##
auth:
## TLS configuration for kafka provisioning Job
##
tls:
## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM`.
## Note: ignored if auth.tls.client.protocol different from one of these values: "SSL" "SASL_SSL"
##
type:jks
## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job.
## When using 'jks' format for certificates, the secret should contain a truststore and a keystore.
## When using 'pem' format for certificates, the secret should contain one of the following:
## 1. A public CA certificate, a public certificate and one private key.
## 2. A truststore and a keystore in PEM format
## If caCert is set, option 1 will be taken, otherwise option 2.
##
certificatesSecret:""
## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt)
##
cert:tls.crt
## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key)
##
key:tls.key
## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt)
##
caCert:ca.crt
## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks)
##
keystore:keystore.jks
## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks)
##
truststore:truststore.jks
## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected.
## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key.
##
passwordsSecret:""
## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password)
## Note: must not be used if `passwordsSecret` is not defined.
##
keyPasswordSecretKey:key-password
## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password)
## Note: must not be used if `passwordsSecret` is not defined.
##
keystorePasswordSecretKey:keystore-password
## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password)
## Note: must not be used if `passwordsSecret` is not defined.
##
truststorePasswordSecretKey:truststore-password
## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided.
##
keyPassword:""
## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided.
##
keystorePassword:""
## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided.
## @param provisioning.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset:"micro"
## @param provisioning.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## @param provisioning.enableServiceLinks Whether information about services should be injected into pod's environment variable
## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace.
## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`.
##
enableServiceLinks:true
## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s)
## e.g:
## extraVolumes:
## - name: kafka-jaas
## secret:
## secretName: kafka-jaas
##
extraVolumes:[]
## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s)