Identity not loading behind ALB

I’m running the Helm chart to setup Camunda on an EKS Cluster. I have the cluster set up and the pods start fine, but I’m not able to access any Web UI. When I browse to the defined host, it redirects to http://identity.mydomain.com/auth/realms/camunda-platform/protocol/openid-connect/auth?client_id=camunda-identity&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Fauth%2Flogin-callback&response_type=code&scope=openid+email&state=http://identity.mydomain.com/auth/realms/camunda-platform/protocol/openid-connect/auth?client_id=camunda-identity&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Fauth%2Flogin-callback&response_type=code&scope=openid+email&state=, but ultimately reports “ERR_TOO_MANY_REDIRECTS”

values.yaml

  annotations: {}
  labels:
    app: camunda-platform

  image:
    tag: 8.0.0
    pullPolicy: IfNotPresent
    pullSecrets: [ ]

  elasticsearch:
    disableExporter: false
    url:
    host: "elasticsearch-master"
    port: 9200
    clusterName: "elasticsearch"
    prefix: zeebe-record
  zeebeClusterName: "{{ .Release.Name }}-zeebe"
  zeebePort: 26500


  identity:
    auth:
      enabled: true

      publicIssuerUrl: "http://identity.mydomain.com/auth/realms/camunda-platform"

      operate:
        existingSecret: ""
        redirectUrl: "http://operate.mydomain.com"

      tasklist:
        existingSecret: ""
        redirectUrl: "http://tasklist.mydomain.com"

      optimize:
        existingSecret: ""
        redirectUrl: "http://optimize.mydomain.com"

zeebe:
  enabled: true

  image:
    repository: camunda/zeebe
    tag:
  clusterSize: "3"
  partitionCount: "3"
  replicationFactor: "3"
  env:
    - name: ZEEBE_BROKER_DATA_SNAPSHOTPERIOD
      value: "5m"
    - name: ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK
      value: "0.85"
    - name: ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK
      value: "0.87"
    - name: IDENTITY_AUTH_PROVIDER_ISSUER_URL
      value: "http://identity.mydomain.com/auth/realms/camunda-platform"
  configMap:
    defaultMode: 0744
  command: []

  logLevel: info
  log4j2: ''
  javaOpts: >-
    -XX:+HeapDumpOnOutOfMemoryError
    -XX:HeapDumpPath=/usr/local/zeebe/data
    -XX:ErrorFile=/usr/local/zeebe/data/zeebe_error%p.log
    -XX:+ExitOnOutOfMemoryError
  service:
    type: ClusterIP
    httpPort: 9600
    httpName: "http"
    commandPort: 26501
    commandName: "command"
    internalPort: 26502
    internalName: "internal"
    extraPorts: []

  serviceAccount:
    enabled: true
    name: ""
    annotations: { }

  cpuThreadCount: "3"
  ioThreadCount: "3"
  resources:
    requests:
      cpu: 800m
      memory: 1200Mi
    limits:
      cpu: 960m
      memory: 1920Mi

  persistenceType: disk
  pvcSize: "32Gi"
  pvcAccessModes: [ "ReadWriteOnce" ]
  pvcStorageClassName: ''

  extraVolumes: [ ]
  extraVolumeMounts: [ ]
  extraInitContainers: [ ]

  podAnnotations: { }
  podLabels: { }
  podDisruptionBudget:
    enabled: false
    minAvailable:
    maxUnavailable: 1

  containerSecurityContext: { }
  nodeSelector: { }
  tolerations: [ ]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchExpressions:
              - key: "app.kubernetes.io/component"
                operator: In
                values:
                  - zeebe-broker
          topologyKey: "kubernetes.io/hostname"

  priorityClassName: ""

  readinessProbe:
    probePath: /ready
    periodSeconds: 10
    successThreshold: 1
    timeoutSeconds: 1

zeebe-gateway:
  replicas: 2
  image:
    repository: camunda/zeebe
    tag:
  podAnnotations: { }
  podLabels: { }

  logLevel: info
  log4j2: ''
  javaOpts: >-
    -XX:+ExitOnOutOfMemoryError
  env: [ ]
  configMap:
    defaultMode: 0744
  command: []

  containerSecurityContext: { }
  podDisruptionBudget:
    enabled: false
    minAvailable: 1
    maxUnavailable:

  resources:
    requests:
      cpu: 400m
      memory: 450Mi
    limits:
      cpu: 400m
      memory: 450Mi

  priorityClassName: ""
  nodeSelector: { }
  tolerations: [ ]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchExpressions:
              - key: "app.kubernetes.io/component"
                operator: In
                values:
                  - zeebe-gateway
          topologyKey: "kubernetes.io/hostname"

  extraVolumeMounts: [ ]
  extraVolumes: [ ]
  extraInitContainers: [ ]

  service:
    type: NodePort
    loadBalancerIP: ""
    loadBalancerSourceRanges: [ ]
    httpPort: 9600
    httpName: "http"
    gatewayPort: 26500
    gatewayName: "gateway"
    internalPort: 26502
    internalName: "internal"
    annotations: {}

  serviceAccount:
    enabled: true
    name: ""
    annotations: { }

operate:
  enabled: true

  image:
    repository: camunda/operate
    tag:

  podLabels: { }
  
  logging:
    level:
      ROOT: INFO
      org.camunda.operate: DEBUG

  service:
    type: NodePort
    port: 80
    annotations: {}

  resources:
    requests:
      cpu: 600m
      memory: 400Mi
    limits:
      cpu: 2000m
      memory: 2Gi

  env: []
  configMap:
    defaultMode: 0744
  command: []
  extraVolumes: []
  extraVolumeMounts: []

  serviceAccount:
    enabled: true
    name: ""
    annotations: { }

  ingress:
    enabled: true
    className: alb
    annotations:
      ingress.kubernetes.io/rewrite-target: "/"
      nginx.ingress.kubernetes.io/ssl-redirect: "false"
      alb.ingress.kubernetes.io/success-codes: 200,302
    path: /
    host: "operate.mydomain.com"
    tls:
      enabled: false
      secretName: ""

  podSecurityContext: {}

  nodeSelector: { }
  tolerations: [ ]
  affinity: { }

tasklist:
  enabled: true

  image:
    repository: camunda/tasklist
    tag:

  env: [ ]

  podLabels: { }

  configMap:
    defaultMode: 0744
  command: []
  service:
    type: NodePort
    port: 80

  graphqlPlaygroundEnabled: ""
  graphqlPlaygroundRequestCredentials: ""

  podSecurityContext: {}

  nodeSelector: { }
  tolerations: [ ]
  affinity: { }

  resources:
    requests:
      cpu: 400m
      memory: 1Gi
    limits:
      cpu: 1000m
      memory: 2Gi

  ingress:
    enabled: true
    className: alb
    annotations:
      ingress.kubernetes.io/rewrite-target: "/"
      nginx.ingress.kubernetes.io/ssl-redirect: "false"
      alb.ingress.kubernetes.io/success-codes: 200,302
    path: /
    host: "tasklist.mydomain.com"

optimize:
  # Enabled if true, the Optimize deployment and its related resources are deployed via a helm release
  enabled: true

  image:
    repository: camunda/optimize
    tag: 3.8.0

  podLabels: { }

  partitionCount: "3"
  env: []
  command: []
  extraVolumes: []
  extraVolumeMounts: []

  serviceAccount:
    enabled: true
    name: ""
    annotations: { }

  service:
    type: NodePort
    port: 80
    annotations: {}

  podSecurityContext: {}

  nodeSelector: { }
  tolerations: [ ]
  affinity: { }

  resources:
    requests:
      cpu: 600m
      memory: 1Gi
    limits:
      cpu: 2000m
      memory: 2Gi

  ingress:
    enabled: true
    className: alb
    annotations:
      ingress.kubernetes.io/rewrite-target: "/"
      nginx.ingress.kubernetes.io/ssl-redirect: "false"
      alb.ingress.kubernetes.io/success-codes: 200,302
    path: /
    host: "optimize.mydomain.com"

retentionPolicy:
  enabled: false
  schedule: "0 0 * * *"
  zeebeIndexTTL: 1
  zeebeIndexMaxSize:
  operateIndexTTL: 30
  tasklistIndexTTL: 30

  image:
    repository: bitnami/elasticsearch-curator
    tag: 5.8.4

prometheusServiceMonitor:
  enabled: false
  labels:
    release: metrics
  scrapeInterval: 10s

identity:
  enabled: true

  firstUser:
    username: demo
    password: demo

  image:
    repository: camunda/identity
    tag:

  service:
    type: NodePort
    port: 80
    annotations: {}

  resources:
    requests:
      cpu: 600m
      memory: 400Mi
    limits:
      cpu: 2000m
      memory: 2Gi

  env: []
  command: []
  extraVolumes: []
  extraVolumeMounts: []

  keycloak:
    service:
      type: ClusterIP
    auth:
      adminUser: admin
      existingSecret: ""

  serviceAccount:
    enabled: true
    name: ""
    annotations: { }

  ingress:
    enabled: true
    className: alb
    annotations:
      ingress.kubernetes.io/rewrite-target: "/"
      nginx.ingress.kubernetes.io/ssl-redirect: "false"
      alb.ingress.kubernetes.io/success-codes: 200,302
    path: /
    host: "identity.mydomain.com"
    tls:
      enabled: false
      secretName: ""

  podSecurityContext: {}

elasticsearch:
  enabled: true
  extraEnvs:
    - name: "xpack.security.enabled"
      value: "false"

  replicas: 2

  volumeClaimTemplate:
    accessModes: [ "ReadWriteOnce" ]
    resources:
      requests:
        storage: 64Gi

  esJavaOpts: "-Xmx1g -Xms1g"

  resources:
    requests:
      cpu: 1
      memory: 1Gi
    limits:
      cpu: 2
      memory: 2Gi

Hey @bjobie

thanks for trying Camunda Platform and rasing this!

The issue what you’re facing is that Identity needs to connect to Keycloak. Identity is just a web facade in front of Keycloak to make it easier to manage the Web application access and roles. When setting the publicIssuerUrl you set this for all web apps including Identity, which will not work.

You need to configure an ingress for Keycloak as well and you have to set global.identity.auth.publicIssuerUrl to that value, not identity.

Hope that helps.

Greets
Chris

I think the issue what you’re facing is that

Hey thanks. That makes sense.
It doesn’t appear there is an ingress section for keycloak in the values.yaml file. But I do see the keycloak subsection under the identity section. I’m just learning Helm/K8s, do I need to create a new ‘keycloak’ section? I don’t suppose you have an example or template somewhere I could look at?

Hey @bjobie

sorry I have no example. But you can check this charts/bitnami/keycloak at master · bitnami/charts · GitHub for more details about how to configure keycloak. Be aware that keycloak is just a dependency/subchart. This means everything under keycloak “section” or object applies to that chart.

Hope this makes sense

Greets
Chris

Well I made progress, when I go to http://operate.mydomain.com it redirects to http://keycloak.mydomain.com/auth/realms/camunda-platform/protocol/openid-connect/auth?client_id=operate&redirect_uri=http%3A%2F%2Foperate.mydomain.com%2Fidentity-callback&response_type=code&scope=openid+email&state= BUT I get a 404 error.

Added this to my helm chart under the identity subchart:

  keycloak:
    service:
      type: NodePort
    ingress:
      enabled: true
      ingressClassName: alb
      hostname: keycloak.mydomain.com
      annotations:
        alb.ingress.kubernetes.io/success-codes: 200,302

Any other tips?

Hi,

you have to provide the alb.ingress.kubernetes.io/group.name annotation in every ingress.annotations group with the same value:

# [...]
identity:
  keycloak:
    service:
      type: "NodePort"
    ingress:
      hostname: "key.my.domain.com"
      path: "/*"
      enabled: true
      ingressClassName: "alb"
      annotations:
        alb.ingress.kubernetes.io/group.name: "camunda-lb"
        alb.ingress.kubernetes.io/success-codes: "200"
        alb.ingress.kubernetes.io/scheme: internet-facing
  service:
    type: "NodePort"
  ingress:
    host: "identity.my.domain.com"
    enabled: true
    className: "alb"
    annotations:
      alb.ingress.kubernetes.io/group.name: "camunda-lb"
      alb.ingress.kubernetes.io/success-codes: "302"
      alb.ingress.kubernetes.io/scheme: internet-facing

operate:
  service:
    type: "NodePort"
  ingress:
    host: "operate.my.domain.com"
    enabled: true
    className: "alb"
    annotations:
      alb.ingress.kubernetes.io/group.name: "camunda-lb"
      alb.ingress.kubernetes.io/success-codes: "302"
      alb.ingress.kubernetes.io/scheme: internet-facing
# [...]

Now you should get only one Application LoadBalancer which targets three (or more) TargetGroups (every ingress resource creates one TargetGroup). After that you have to create an A record that points to the single Application LoadBalancer in Route53:
Record name: *
Route traffic to: Alias to Application and Classic Load Balancer (Enable Alias Switch)