Compare commits

..

No commits in common. 'service-demo01' and 'master' have entirely different histories.

  1. 0
      demo01-flow-test1/values-quota.yaml
  2. 36
      demo01-flow-test1/values.yaml
  3. 36
      demo01-kaf-conn-sink2/values.yaml
  4. 41
      demo01-kaf-conn-src2/values.yaml
  5. 40
      demo01-kaf-kong-sink4/values.yaml
  6. 79
      demo01-kafka-connect2/values.yaml
  7. 50
      demo01-kafka-ui2/values.yaml
  8. 63
      demo01-kafka/values.yaml
  9. 95
      demo01-kafka2/values.yaml
  10. 14
      demo01-kubeflow/values.yaml
  11. 48
      demo01-pg1/values.yaml
  12. 48
      demo01-postgresql2/values.yaml
  13. 8
      demo01-qdrant/values-quota.yaml
  14. 4
      demo01-qdrant/values-volume-quota.yaml
  15. 48
      demo01-qdrant/values.yaml
  16. 75
      demo01-star3/values.yaml
  17. 185
      demo01-super9/values.yaml
  18. 14
      demo01-test/values.yaml
  19. 8
      demo01-test01/values-quota.yaml
  20. 4
      demo01-test01/values-volume-quota.yaml
  21. 48
      demo01-test01/values.yaml

@ -1,36 +0,0 @@
#version: flowise, 6.0.0
image:
registry: docker.io
persistence:
enabled: true
size: 1Gi
storageClass: ""
resources: {}
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "root-ca-issuer"
cert-manager.io/duration: 8760h
cert-manager.io/renew-before: 720h
konghq.com/plugins: oidc-plugin, keycloak-authz-plugin
hosts:
- host: "demo01-flow-test1.gke.paasup.io"
paths:
- /
tls:
- hosts:
- "demo01-flow-test1.gke.paasup.io"
secretName: "demo01-flow-test1-tls-secret"
postgresql:
enabled: true
auth:
existingSecret: "demo01-flow-test1-postgresql"
primary:
persistence:
enabled: true
size: 8Gi
storageClass: ""

@ -1,36 +0,0 @@
#version: kafka-connector, 1.0.0
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: "demo01-kaf-conn-sink2-kafka-starrocks"
namespace: kafka-cluster
labels:
strimzi.io/cluster: kafka-cluster
spec:
class: com.starrocks.connector.kafka.StarRocksSinkConnector
tasksMax: 6
config:
topics: "demo01.public.subway_info_copy, demo01.public.subway_passengers_copy, demo01.public.subway_passengers_time_copy"
starrocks.http.url: kube-starrocks-fe-service.demo01-star3.svc.cluster.local:8030
starrocks.database.name: "quickstart"
starrocks.username: "root"
starrocks.password: "sdHwyKH8p6Xw"
sink.properties.strip_outer_array: true
connect.timeoutms: "30000"
starrocks.topic2table.map: "demo01.public.subway_info_copy:subway_info_copy, demo01.public.subway_passengers_copy:subway_passengers_copy, demo01.public.subway_passengers_time_copy:subway_passengers_time_copy"
transforms: addfield,unwrap
transforms.addfield.type: com.starrocks.connector.kafka.transforms.AddOpFieldForDebeziumRecord
transforms.unwrap.type: io.debezium.transforms.ExtractNewRecordState
transforms.unwarp.drop.tombstones: true
transforms.unwarp.delete.handling.mode: rewrite

@ -1,41 +0,0 @@
#version: kafka-connector, 1.0.0
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: "demo01-kaf-conn-src2-kafka-postgresql"
namespace: kafka-cluster
labels:
strimzi.io/cluster: kafka-cluster
spec:
class: io.debezium.connector.postgresql.PostgresConnector
tasksMax: 1
config:
database.hostname: "demo01-postgresql2-postgresql-ha-postgresql.demo01-postgresql2.svc.cluster.local"
database.port: "5432"
database.user: "postgres"
database.password: "Gb58gQx8Nhw8"
database.dbname: "postgres"
table.include.list: "public.subway_info_copy, public.subway_passengers_copy, public.subway_passengers_time_copy"
snapshot.mode: "initial"
plugin.name: pgoutput
slot.name: "debezium_slot"
publication.autocreate.mode: filtered
topic.prefix: "demo01"
producer.override.security.protocol: "SASL_PLAINTEXT"
producer.override.sasl.mechanism: "OAUTHBEARER"
producer.override.sasl.jaas.config: |
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required
oauth.token.endpoint.uri="https://keycloak.gke.paasup.io/realms/paasup/protocol/openid-connect/token"
oauth.client.id="service-demo01-kafka-common"
oauth.client.secret="6c9ddc54-5cf6-4b63-aff8-7e0d92b35fbe";

@ -1,40 +0,0 @@
#version: kafka-connector, 1.0.0
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: "demo01-kaf-kong-sink4-kafka-starrocks"
namespace: kafka-cluster
labels:
strimzi.io/cluster: kafka-cluster
spec:
class: com.starrocks.connector.kafka.StarRocksSinkConnector
tasksMax: 6
config:
topics: "demo01.logging.kong"
starrocks.http.url: kube-starrocks-fe-service.demo01-star3.svc.cluster.local:8030
starrocks.database.name: "quickstart"
starrocks.username: "root"
starrocks.password: "sdHwyKH8p6Xw"
sink.properties.strip_outer_array: true
connect.timeoutms: "30000"
starrocks.topic2table.map: "demo01.logging.kong:kong_log_events"
key.converter: "org.apache.kafka.connect.json.JsonConverter"
value.converter: "org.apache.kafka.connect.json.JsonConverter"
key.converter.schemas.enable: "true"
value.converter.schemas.enable: "false"
errors.tolerance: "all"
errors.deadletterqueue.topic.name: "demo01.logging.dlq"
errors.deadletterqueue.topic.replication.factor: "1"
errors.log.enable: "true"

@ -1,79 +0,0 @@
#version: kafka-connect, 1.0.0
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnect
metadata:
name: "demo01-kafka-connect2"
namespace: demo01-kafka
labels:
strimzi.io/cluster: kafka
annotations:
strimzi.io/use-connector-resources: 'true'
spec:
image: paasup/kafka-connect:0.1
replicas: 1
bootstrapServers: "demo01-kafka.demo01-kafka.svc.cluster.local:9092"
config:
group.id: connect-cluster
offset.storage.topic: connect-offsets
config.storage.topic: connect-configs
status.storage.topic: connect-status
key.converter: org.apache.kafka.connect.json.JsonConverter
value.converter: org.apache.kafka.connect.json.JsonConverter
plugin.path: /opt/kafka/plugins
authentication:
type: plain
username: admin
passwordSecret:
secretName: "demo01-kafka-connect2-infisicalsecret"
password: password
logging:
type: inline
loggers:
rootLogger.level: INFO
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: debezium-postgres-source
namespace: demo01-kafka
labels:
strimzi.io/cluster: "demo01-kafka-connect2"
spec:
class: io.debezium.connector.postgresql.PostgresConnector
tasksMax: 1
config:
database.hostname: "demo01-postgresql2-postgresql-ha-postgresql.demo01-postgresql2.svc.cluster.local"
database.port: "5432"
database.user: "postgres"
database.password: "Gb58gQx8Nhw8"
database.dbname: "postgres"
table.include.list: "public.subway_info"
plugin.name: pgoutput
slot.name: debezium_slot
publication.autocreate.mode: filtered
topic.prefix: pg
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: s3-sink
namespace: demo01-kafka
labels:
strimzi.io/cluster: "demo01-kafka-connect2"
spec:
class: io.confluent.connect.s3.S3SinkConnector
tasksMax: 1
config:
topics: "pg.public.subway_info"
store.url: "http://minio.minio.svc.cluster.local:9000"
s3.region: us-east-1
aws.access.key.id: "adminuser"
aws.secret.access.key: "Paasupgke1234!"
s3.bucket.name: "sink"
s3.part.size: 5242880
flush.size: 3
format.class: io.confluent.connect.s3.format.json.JsonFormat
storage.class: io.confluent.connect.s3.storage.S3Storage
schema.compatibility: NONE

@ -1,50 +0,0 @@
#version: kafka-ui, 1.5.1
image:
registry: docker.io
repository: wbsong111/kafka-ui
tag: "v1.3.0"
pullPolicy: IfNotPresent
yamlApplicationConfig:
kafka:
clusters:
- name: kafka-cluster
bootstrapServers: SASL_PLAINTEXT://kafka-cluster-kafka-tls-bootstrap.kafka-cluster.svc.cluster.local:9093
properties:
security.protocol: SASL_PLAINTEXT
sasl.mechanism: OAUTHBEARER
sasl.jaas.config: |
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required
oauth.token.endpoint.uri="https://keycloak.gke.paasup.io/realms/paasup/protocol/openid-connect/token"
oauth.client.id="service-demo01-kafka-common"
oauth.client.secret="6c9ddc54-5cf6-4b63-aff8-7e0d92b35fbe";
sasl.login.callback.handler.class: "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"
auth:
type: disabled
management:
health:
ldap:
enabled: false
volumes:
- name: truststore
secret:
secretName: truststore
volumeMounts:
- name: truststore
mountPath: /etc/kafka/secrets
readOnly: true
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "root-ca-issuer"
cert-manager.io/duration: 8760h
cert-manager.io/renew-before: 720h
kubernetes.io/ingress.class: kong
konghq.com/protocols: https
host: "demo01-kafka-ui2.gke.paasup.io"
tls:
enabled: true
secretName: "demo01-kafka-ui2-tls-secret"

@ -1,63 +0,0 @@
#version: kafka, 32.4.3
global:
imageRegistry: ""
imagePullSecrets: []
defaultStorageClass: ""
image:
registry: docker.io
repository: bitnami/kafka
tag: 4.0.0-debian-12-r10
controller:
replicaCount: 3
controllerOnly: false
persistence:
enabled: true
size: 8Gi
storageClass: ""
logPersistence:
enabled: false
size: 8Gi
storageClass: ""
resources: {}
resourcesPreset: "small"
broker:
replicaCount: 0
persistence:
enabled: true
size: 8Gi
storageClass: ""
logPersistence:
enabled: true
size: 8Gi
storageClass: ""
resources: {}
resourcesPreset: "small"
listeners:
client:
containerPort: 9092
protocol: SASL_PLAINTEXT
controller:
containerPort: 9093
protocol: SASL_PLAINTEXT
interbroker:
containerPort: 9094
protocol: SASL_PLAINTEXT
sasl:
enabledMechanisms: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
client:
users: ["admin"]
passwords: "password1234!"
service:
type: ClusterIP
ports:
client: 9092
metrics:
jmx:
enabled: false

@ -1,95 +0,0 @@
#version: kafka, 1.0.0
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaUser
metadata:
name: "service-account-service-demo01-kafka-common"
labels:
strimzi.io/cluster: kafka-cluster
spec:
authorization:
type: simple
acls:
- resource:
type: topic
name: "demo01."
patternType: prefix
operations:
- Read
- Describe
- DescribeConfigs
- Write
- resource:
type: group
name: "demo01-"
patternType: prefix
operations:
- Read
- Write
- Describe
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
labels:
strimzi.io/cluster: kafka-cluster
name: "public-subway-info-copy"
namespace: kafka-cluster
spec:
partitions: 6
replicas: 3
topicName: "demo01.public.subway_info_copy"
config:
cleanup.policy: compact
retention.bytes: "1073741824"
segment.bytes: "268435456"
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
labels:
strimzi.io/cluster: kafka-cluster
name: "public-subway-passengers-copy"
namespace: kafka-cluster
spec:
partitions: 6
replicas: 3
topicName: "demo01.public.subway_passengers_copy"
config:
cleanup.policy: compact
retention.bytes: "1073741824"
segment.bytes: "268435456"
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
labels:
strimzi.io/cluster: kafka-cluster
name: "public-subway-passengers-time-copy"
namespace: kafka-cluster
spec:
partitions: 6
replicas: 3
topicName: "demo01.public.subway_passengers_time_copy"
config:
cleanup.policy: compact
retention.bytes: "1073741824"
segment.bytes: "268435456"
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
labels:
strimzi.io/cluster: kafka-cluster
name: "logging-dlq"
namespace: kafka-cluster
spec:
partitions: 6
replicas: 3
topicName: "demo01.logging.dlq"
config:
cleanup.policy: compact
retention.bytes: "1073741824"
segment.bytes: "268435456"

@ -1,14 +0,0 @@
#version: kubeflow-profile, 1.10.0
apiVersion: kubeflow.org/v1
kind: Profile
metadata:
name: "demo01-kubeflow"
spec:
owner:
kind: User
name: "adminuser@paasup.io"
resourceQuotaSpec:
hard:
cpu: "16"
memory: 32Gi
persistentvolumeclaims: "12"

@ -1,48 +0,0 @@
#version: postgresql-ha, 11.9.4-1
global:
imageRegistry: ""
postgresql:
username: postgres
existingSecret: "demo01-pg1-infisicalsecret"
maxConnections: "100"
sharedPreloadLibraries: "repmgr, pgaudit, pg_stat_statements, pgoutput"
extendedConf: |-
wal_level = logical
max_replication_slots = 4
max_wal_senders = 4
replicaCount: 1
extraEnvVars:
- name: TZ
value: Asia/Seoul
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 500m
memory: 1024Mi
tolerations: []
nodeSelector: {}
pgpool:
existingSecret: "demo01-pg1-infisicalsecret"
replicaCount: 0
persistence:
enabled: true
storageClass: ""
size: 1Gi
volumePermissions:
enabled: true
podSecurityContext:
runAsUser: 0

@ -1,48 +0,0 @@
#version: postgresql-ha, 11.9.4-1
global:
imageRegistry: ""
postgresql:
username: postgres
existingSecret: "demo01-postgresql2-infisicalsecret"
maxConnections: "100"
sharedPreloadLibraries: "repmgr, pgaudit, pg_stat_statements, pgoutput"
extendedConf: |-
wal_level = logical
max_replication_slots = 4
max_wal_senders = 4
replicaCount: 1
extraEnvVars:
- name: TZ
value: Asia/Seoul
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 500m
memory: 1024Mi
tolerations: []
nodeSelector: {}
pgpool:
existingSecret: "demo01-postgresql2-infisicalsecret"
replicaCount: 0
persistence:
enabled: true
storageClass: ""
size: 10Gi
volumePermissions:
enabled: true
podSecurityContext:
runAsUser: 0

@ -1,8 +0,0 @@
resources:
requests:
cpu: 1000m
memory: 2Gi
limits:
cpu: 2000m
memory: 4Gi

@ -1,4 +0,0 @@
persistence:
accessModes: ["ReadWriteOnce"]
size: 10Gi
storageClassName: ""

@ -1,48 +0,0 @@
#version: qdrant, 1.12.4
images:
repository: docker.io/qdrant/qdrant
tag: v1.12.4
replicaCount: 1
nodeSelector: {}
tolerations: []
resources: {}
persistence:
accessModes: ["ReadWriteOnce"]
size: 10Gi
storageClassName: ""
config:
log_level: INFO
cluster:
enabled: true
p2p:
port: 6335
consensus:
tick_period_ms: 100
apiKey: false
readOnlyApiKey: false
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "root-ca-issuer"
cert-manager.io/duration: 8760h
cert-manager.io/renew-before: 720h
konghq.com/plugins: oidc-plugin, keycloak-authz-plugin
hosts:
- host: "demo01-qdrant.gke.paasup.io"
paths:
- path: /
pathType: Prefix
servicePort: 6333
tls:
- hosts:
- "demo01-qdrant.gke.paasup.io"
secretName: "demo01-qdrant-tls-secret"
dip:
mainPath: dashboard

@ -1,75 +0,0 @@
#version: starrocks, 1.11.0
initPassword:
enabled: true
password: ""
passwordSecret: "demo01-star3-infisicalsecret"
timeZone: Asia/Seoul
starrocksCluster:
enabledBe: false
enabledCn: true
starrocksFESpec:
replicas: 3
runAsNonRoot: "false"
service:
type: ClusterIP
resources:
requests:
cpu: 300m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
storageSpec:
name: fe
storageClassName: "standard-rwo"
storageSize: 10Gi
logStorageSize: 5Gi
nodeSelector: {}
affinity: {}
tolerations: []
config: |
run_mode = shared_data
cloud_native_storage_type = S3
aws_s3_endpoint = minio.minio.svc.cluster.local:9000
aws_s3_path = starrocks2
aws_s3_access_key = IyCGYXy3oh1h1rpm6XS1
aws_s3_secret_key = hyd2yPyyHTQZByzzixt99pY0AjTyRsWiOTCxBacp
aws_s3_use_instance_profile = false
aws_s3_use_aws_sdk_default_behavior = false
enable_load_volume_from_conf = true
starrocksCnSpec:
replicas: 3
runAsNonRoot: "false"
resources:
requests:
cpu: 300M
memory: 2Gi
limits:
cpu: 2
memory: 4Gi
storageSpec:
name: be
storageClassName: "standard-rwo"
storageSize: 15Gi
logStorageSize: 10Gi
nodeSelector: {}
affinity: {}
tolerations: []
autoScalingPolicy: {}
starrocksFeProxySpec:
enabled: true
resolver: "kube-dns.kube-system.svc.cluster.local"
service:
type: ClusterIP

@ -1,185 +0,0 @@
#version: superset, 0.13.5-1
configOverrides:
secret: |
SECRET_KEY = 'W5cHzo1QQitb'
my_override: |
FEATURE_FLAGS = {
"ENABLE_TEMPLATE_REMOVE_FILTERS" : True,
"ENABLE_TEMPLATE_PROCESSING": True,
"DASHBOARD_NATIVE_FILTERS" : True,
"DASHBOARD_NATIVE_FILTERS_SET": True
}
enable_oauth: |
from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH)
from superset.security import SupersetSecurityManager
from flask import request
import requests
import logging
class CustomSsoSecurityManager(SupersetSecurityManager):
def oauth_user_info(self, provider, response=None):
me = self.appbuilder.sm.oauth_remotes[provider].get("openid-connect/userinfo")
me.raise_for_status()
data = me.json()
logging.debug("User info from Keycloak: %s", data)
role = []
username = data.get("preferred_username", "")
host = request.host
dip_api_url = "http://dip-api.platform.svc.cluster.local:8087"
url = f"{dip_api_url}/gwapi/v1/projectusers/{username}"
request_data = {"url": f"https://{host}"}
response = requests.post(url, json=request_data, headers={"Content-Type": "application/json"}, verify=False)
if response.status_code == 200:
logging.info(f"API 요청 성공: {response.status_code}, {response.text}")
role.append(response.json().get("roleName",""))
else:
logging.info(f"API 요청 실패: {response.status_code}, {response.text}")
role.append("")
return {
"username": data.get("preferred_username", ""),
"first_name": data.get("given_name", ""),
"last_name": data.get("family_name", ""),
"email": data.get("email", ""),
"role_keys": role,
}
AUTH_TYPE = AUTH_OAUTH
AUTH_USER_REGISTRATION = True
AUTH_USER_REGISTRATION_ROLE = "Public"
AUTH_ROLES_SYNC_AT_LOGIN = True
CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager
OAUTH_PROVIDERS = [
{
"name": "keycloak",
"icon": "fa-key",
"token_key": "access_token",
"remote_app": {
"client_id": "service-demo01-super9",
"client_secret": "ba5646c9-6be0-4355-af0f-66a6b3626530",
"client_kwargs": {
"scope": "openid email profile",
'verify': False
},
'server_metadata_url': 'https://keycloak.gke.paasup.io/realms/paasup/.well-known/openid-configuration',
'api_base_url': 'https://keycloak.gke.paasup.io/realms/paasup/protocol/'
}
}
]
AUTH_ROLES_MAPPING = {
'root': ['Admin'],
'admin': ['Admin'],
'manager': ['Admin'],
'member': ['Alpha'],
}
bootstrapScript: |
#!/bin/bash
apt update
apt install -y pkg-config build-essential default-libmysqlclient-dev libpq-dev
pip install sqlalchemy-drill psycopg2-binary Authlib
pip install mysqlclient
image:
repository: apachesuperset.docker.scarf.sh/apache/superset
tag: ~
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
cert-manager.io/duration: 8760h
cert-manager.io/renew-before: 720h
path: /
pathType: ImplementationSpecific
hosts:
- "demo01-super9.gke.paasup.io"
tls:
- hosts:
- "demo01-super9.gke.paasup.io"
secretName: "demo01-super9-tls-secret"
supersetNode:
replicas:
enabled: true
replicaCount: 1
connections:
redis_host: "demo01-super9-redis-headless"
redis_port: "6379"
redis_user: ""
redis_cache_db: "1"
redis_celery_db: "0"
redis_ssl:
enabled: false
ssl_cert_reqs: CERT_NONE
db_host: "demo01-super9-postgresql"
db_port: "5432"
db_user: superset
db_pass: "Gb58gQx8Nhw8"
db_name: superset
resources: {}
supersetWorker:
replicas:
enabled: true
replicaCount: 1
resources: {}
supersetCeleryBeat:
enabled: false
resources: {}
supersetCeleryFlower:
enabled: false
replicaCount: 1
resources: {}
postgresql:
enabled: true
auth:
username: superset
password: ""
database: superset
existingSecret: "demo01-super9-infisicalsecret"
image:
registry: docker.io
primary:
resources:
limits: {}
requests:
memory: 256Mi
cpu: 250m
persistence:
enabled: true
storageClass: ""
size: 8Gi
redis:
enabled: true
architecture: standalone
auth:
enabled: false
existingSecret: ""
existingSecretPasswordKey: ""
image:
registry: docker.io
master:
resources:
limits: {}
requests: {}
persistence:
enabled: true
storageClass: ""
size: 8Gi

@ -1,14 +0,0 @@
#version: kubeflow-profile, 1.10.0
apiVersion: kubeflow.org/v1
kind: Profile
metadata:
name: "demo01-test"
spec:
owner:
kind: User
name: "adminuser@paasup.io"
resourceQuotaSpec:
hard:
cpu: "16"
memory: 32Gi
persistentvolumeclaims: "12"

@ -1,8 +0,0 @@
resources:
requests:
cpu: 1000m
memory: 2Gi
limits:
cpu: 2000m
memory: 4Gi

@ -1,4 +0,0 @@
persistence:
accessModes: ["ReadWriteOnce"]
size: 10Gi
storageClassName: ""

@ -1,48 +0,0 @@
#version: qdrant, 1.12.4
images:
repository: docker.io/qdrant/qdrant
tag: v1.12.4
replicaCount: 1
nodeSelector: {}
tolerations: []
resources: {}
persistence:
accessModes: ["ReadWriteOnce"]
size: 10Gi
storageClassName: ""
config:
log_level: INFO
cluster:
enabled: true
p2p:
port: 6335
consensus:
tick_period_ms: 100
apiKey: false
readOnlyApiKey: false
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "root-ca-issuer"
cert-manager.io/duration: 8760h
cert-manager.io/renew-before: 720h
konghq.com/plugins: oidc-plugin, keycloak-authz-plugin
hosts:
- host: "demo01-test01.gke.paasup.io"
paths:
- path: /
pathType: Prefix
servicePort: 6333
tls:
- hosts:
- "demo01-test01.gke.paasup.io"
secretName: "demo01-test01-tls-secret"
dip:
mainPath: dashboard
Loading…
Cancel
Save