From 203d053aaca334e7b819602306c38cd4d2ce8bf1 Mon Sep 17 00:00:00 2001 From: Michael Heap Date: Thu, 28 Mar 2024 13:56:46 +0000 Subject: [PATCH] Remove KIC versions below 2.5 --- app/_data/docs_nav_kic_1.0.x.yml | 114 --- app/_data/docs_nav_kic_1.1.x.yml | 114 --- app/_data/docs_nav_kic_1.2.x.yml | 118 --- app/_data/docs_nav_kic_1.3.x.yml | 118 --- app/_data/docs_nav_kic_2.0.x.yml | 126 --- app/_data/docs_nav_kic_2.1.x.yml | 126 --- app/_data/docs_nav_kic_2.2.x.yml | 130 --- app/_data/kong_versions.yml | 27 - .../1.0.x/concepts/custom-resources.md | 131 --- .../1.0.x/concepts/deployment.md | 302 ------- .../1.0.x/concepts/design.md | 64 -- .../1.0.x/concepts/ha-and-scaling.md | 69 -- .../1.0.x/concepts/ingress-classes.md | 177 ---- .../1.0.x/concepts/ingress-versions.md | 172 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../1.0.x/concepts/security.md | 67 -- .../1.0.x/deployment/admission-webhook.md | 211 ----- .../1.0.x/deployment/aks.md | 79 -- .../1.0.x/deployment/eks.md | 93 --- .../1.0.x/deployment/gke.md | 131 --- .../1.0.x/deployment/k4k8s-enterprise.md | 152 ---- .../1.0.x/deployment/k4k8s.md | 78 -- .../1.0.x/deployment/kong-enterprise.md | 138 ---- .../1.0.x/deployment/minikube.md | 77 -- .../1.0.x/deployment/overview.md | 65 -- .../1.0.x/faq.md | 25 - .../1.0.x/guides/cert-manager.md | 372 --------- .../1.0.x/guides/configure-acl-plugin.md | 755 ----------------- .../guides/configuring-custom-entities.md | 180 ---- .../guides/configuring-fallback-service.md | 182 ----- .../1.0.x/guides/configuring-health-checks.md | 353 -------- .../guides/configuring-https-redirect.md | 148 ---- .../1.0.x/guides/getting-started-istio.md | 217 ----- .../1.0.x/guides/getting-started.md | 271 ------ .../1.0.x/guides/overview.md | 74 -- .../1.0.x/guides/preserve-client-ip.md | 111 --- .../1.0.x/guides/prometheus-grafana.md | 343 -------- .../1.0.x/guides/redis-rate-limiting.md | 222 ----- .../1.0.x/guides/setting-up-custom-plugins.md | 212 ----- .../1.0.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 227 ----- .../1.0.x/guides/using-external-service.md | 83 -- .../1.0.x/guides/using-ingress-with-grpc.md | 90 -- .../1.0.x/guides/using-kong-with-knative.md | 237 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../1.0.x/guides/using-kongplugin-resource.md | 469 ----------- .../1.0.x/guides/using-mtls-auth-plugin.md | 320 -------- .../1.0.x/guides/using-oidc-plugin.md | 150 ---- .../1.0.x/guides/using-rewrites.md | 199 ----- .../1.0.x/guides/using-tcpingress.md | 254 ------ .../1.0.x/index.md | 91 --- .../1.0.x/references/annotations.md | 428 ---------- .../1.0.x/references/cli-arguments.md | 70 -- .../1.0.x/references/custom-resources.md | 419 ---------- .../1.0.x/references/plugin-compatibility.md | 112 --- .../1.0.x/references/version-compatibility.md | 72 -- .../1.0.x/troubleshooting.md | 182 ----- .../1.1.x/concepts/custom-resources.md | 131 --- .../1.1.x/concepts/deployment.md | 302 ------- .../1.1.x/concepts/design.md | 64 -- .../1.1.x/concepts/ha-and-scaling.md | 69 -- .../1.1.x/concepts/ingress-classes.md | 177 ---- .../1.1.x/concepts/ingress-versions.md | 170 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../1.1.x/concepts/security.md | 67 -- .../1.1.x/deployment/admission-webhook.md | 211 ----- .../1.1.x/deployment/aks.md | 76 -- .../1.1.x/deployment/eks.md | 90 -- .../1.1.x/deployment/gke.md | 128 --- .../1.1.x/deployment/k4k8s-enterprise.md | 147 ---- .../1.1.x/deployment/k4k8s.md | 76 -- .../1.1.x/deployment/kong-enterprise.md | 138 ---- .../1.1.x/deployment/minikube.md | 74 -- .../1.1.x/deployment/overview.md | 65 -- .../1.1.x/faq.md | 25 - .../1.1.x/guides/cert-manager.md | 372 --------- .../1.1.x/guides/configure-acl-plugin.md | 755 ----------------- .../guides/configuring-custom-entities.md | 180 ---- .../guides/configuring-fallback-service.md | 182 ----- .../1.1.x/guides/configuring-health-checks.md | 353 -------- .../guides/configuring-https-redirect.md | 148 ---- .../1.1.x/guides/getting-started-istio.md | 217 ----- .../1.1.x/guides/getting-started.md | 271 ------ .../1.1.x/guides/overview.md | 74 -- .../1.1.x/guides/preserve-client-ip.md | 111 --- .../1.1.x/guides/prometheus-grafana.md | 338 -------- .../1.1.x/guides/redis-rate-limiting.md | 222 ----- .../1.1.x/guides/setting-up-custom-plugins.md | 213 ----- .../1.1.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 227 ----- .../1.1.x/guides/using-external-service.md | 83 -- .../1.1.x/guides/using-ingress-with-grpc.md | 90 -- .../1.1.x/guides/using-kong-with-knative.md | 237 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../1.1.x/guides/using-kongplugin-resource.md | 469 ----------- .../1.1.x/guides/using-mtls-auth-plugin.md | 320 -------- .../1.1.x/guides/using-oidc-plugin.md | 150 ---- .../1.1.x/guides/using-rewrites.md | 199 ----- .../1.1.x/guides/using-tcpingress.md | 254 ------ .../1.1.x/index.md | 91 --- .../1.1.x/references/annotations.md | 445 ---------- .../1.1.x/references/cli-arguments.md | 70 -- .../1.1.x/references/custom-resources.md | 419 ---------- .../1.1.x/references/plugin-compatibility.md | 112 --- .../1.1.x/references/version-compatibility.md | 74 -- .../1.1.x/troubleshooting.md | 182 ----- .../1.2.x/concepts/custom-resources.md | 131 --- .../1.2.x/concepts/deployment.md | 302 ------- .../1.2.x/concepts/design.md | 64 -- .../1.2.x/concepts/ha-and-scaling.md | 69 -- .../1.2.x/concepts/ingress-classes.md | 177 ---- .../1.2.x/concepts/ingress-versions.md | 170 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../1.2.x/concepts/security.md | 67 -- .../1.2.x/deployment/admission-webhook.md | 211 ----- .../1.2.x/deployment/aks.md | 76 -- .../1.2.x/deployment/eks.md | 90 -- .../1.2.x/deployment/gke.md | 128 --- .../1.2.x/deployment/k4k8s-enterprise.md | 147 ---- .../1.2.x/deployment/k4k8s.md | 76 -- .../1.2.x/deployment/kong-enterprise.md | 138 ---- .../1.2.x/deployment/minikube.md | 74 -- .../1.2.x/deployment/overview.md | 65 -- .../1.2.x/faq.md | 25 - .../1.2.x/guides/cert-manager.md | 372 --------- .../1.2.x/guides/configure-acl-plugin.md | 755 ----------------- .../guides/configuring-custom-entities.md | 180 ---- .../guides/configuring-fallback-service.md | 182 ----- .../1.2.x/guides/configuring-health-checks.md | 353 -------- .../guides/configuring-https-redirect.md | 148 ---- .../1.2.x/guides/getting-started-istio.md | 217 ----- .../1.2.x/guides/getting-started.md | 271 ------ .../1.2.x/guides/overview.md | 74 -- .../1.2.x/guides/preserve-client-ip.md | 111 --- .../1.2.x/guides/prometheus-grafana.md | 338 -------- .../1.2.x/guides/redis-rate-limiting.md | 222 ----- .../1.2.x/guides/setting-up-custom-plugins.md | 213 ----- .../1.2.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 227 ----- .../1.2.x/guides/using-external-service.md | 83 -- .../1.2.x/guides/using-ingress-with-grpc.md | 90 -- .../1.2.x/guides/using-kong-with-knative.md | 237 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../1.2.x/guides/using-kongplugin-resource.md | 469 ----------- .../1.2.x/guides/using-mtls-auth-plugin.md | 320 -------- .../1.2.x/guides/using-oidc-plugin.md | 150 ---- .../1.2.x/guides/using-rewrites.md | 199 ----- .../1.2.x/guides/using-tcpingress.md | 254 ------ .../1.2.x/index.md | 92 --- .../1.2.x/references/annotations.md | 473 ----------- .../1.2.x/references/cli-arguments.md | 71 -- .../1.2.x/references/custom-resources.md | 419 ---------- .../1.2.x/references/plugin-compatibility.md | 112 --- .../1.2.x/references/version-compatibility.md | 74 -- .../1.2.x/troubleshooting.md | 221 ----- .../1.3.x/concepts/custom-resources.md | 131 --- .../1.3.x/concepts/deployment.md | 302 ------- .../1.3.x/concepts/design.md | 64 -- .../1.3.x/concepts/ha-and-scaling.md | 69 -- .../1.3.x/concepts/ingress-classes.md | 177 ---- .../1.3.x/concepts/ingress-versions.md | 171 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../1.3.x/concepts/security.md | 67 -- .../1.3.x/deployment/admission-webhook.md | 211 ----- .../1.3.x/deployment/aks.md | 76 -- .../1.3.x/deployment/eks.md | 90 -- .../1.3.x/deployment/gke.md | 128 --- .../1.3.x/deployment/k4k8s-enterprise.md | 147 ---- .../1.3.x/deployment/k4k8s.md | 76 -- .../1.3.x/deployment/kong-enterprise.md | 138 ---- .../1.3.x/deployment/minikube.md | 74 -- .../1.3.x/deployment/overview.md | 65 -- .../1.3.x/faq.md | 25 - .../1.3.x/guides/cert-manager.md | 372 --------- .../1.3.x/guides/configure-acl-plugin.md | 755 ----------------- .../guides/configuring-custom-entities.md | 180 ---- .../guides/configuring-fallback-service.md | 182 ----- .../1.3.x/guides/configuring-health-checks.md | 353 -------- .../guides/configuring-https-redirect.md | 148 ---- .../1.3.x/guides/getting-started-istio.md | 219 ----- .../1.3.x/guides/getting-started.md | 271 ------ .../1.3.x/guides/overview.md | 74 -- .../1.3.x/guides/preserve-client-ip.md | 111 --- .../1.3.x/guides/prometheus-grafana.md | 338 -------- .../1.3.x/guides/redis-rate-limiting.md | 222 ----- .../1.3.x/guides/setting-up-custom-plugins.md | 213 ----- .../1.3.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 227 ----- .../1.3.x/guides/using-external-service.md | 83 -- .../1.3.x/guides/using-ingress-with-grpc.md | 89 -- .../1.3.x/guides/using-kong-with-knative.md | 237 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../1.3.x/guides/using-kongplugin-resource.md | 469 ----------- .../1.3.x/guides/using-mtls-auth-plugin.md | 321 -------- .../1.3.x/guides/using-oidc-plugin.md | 150 ---- .../1.3.x/guides/using-rewrites.md | 199 ----- .../1.3.x/guides/using-tcpingress.md | 254 ------ .../1.3.x/index.md | 92 --- .../1.3.x/references/annotations.md | 529 ------------ .../1.3.x/references/cli-arguments.md | 71 -- .../1.3.x/references/custom-resources.md | 419 ---------- .../1.3.x/references/plugin-compatibility.md | 112 --- .../1.3.x/references/version-compatibility.md | 64 -- .../1.3.x/troubleshooting.md | 289 ------- .../2.0.x/concepts/custom-resources.md | 134 --- .../2.0.x/concepts/deployment.md | 306 ------- .../2.0.x/concepts/design.md | 64 -- .../2.0.x/concepts/ha-and-scaling.md | 69 -- .../2.0.x/concepts/ingress-classes.md | 134 --- .../2.0.x/concepts/ingress-versions.md | 172 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../2.0.x/concepts/security.md | 67 -- .../2.0.x/deployment/admission-webhook.md | 211 ----- .../2.0.x/deployment/aks.md | 76 -- .../2.0.x/deployment/eks.md | 90 -- .../2.0.x/deployment/gke.md | 128 --- .../2.0.x/deployment/k4k8s-enterprise.md | 147 ---- .../2.0.x/deployment/k4k8s.md | 76 -- .../2.0.x/deployment/kong-enterprise.md | 138 ---- .../2.0.x/deployment/minikube.md | 74 -- .../2.0.x/deployment/overview.md | 65 -- .../2.0.x/examples/001_multiple-services.yaml | 88 -- .../2.0.x/faq.md | 25 - .../2.0.x/guides/cert-manager.md | 377 --------- .../2.0.x/guides/configure-acl-plugin.md | 773 ------------------ .../guides/configuring-fallback-service.md | 187 ----- .../2.0.x/guides/configuring-health-checks.md | 356 -------- .../guides/configuring-https-redirect.md | 151 ---- .../2.0.x/guides/getting-started-istio.md | 521 ------------ .../2.0.x/guides/getting-started.md | 276 ------- .../2.0.x/guides/overview.md | 74 -- .../2.0.x/guides/preserve-client-ip.md | 111 --- .../2.0.x/guides/prometheus-grafana.md | 346 -------- .../2.0.x/guides/redis-rate-limiting.md | 225 ----- .../2.0.x/guides/setting-up-custom-plugins.md | 213 ----- .../2.0.x/guides/upgrade.md | 296 ------- .../2.0.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 229 ------ .../2.0.x/guides/using-external-service.md | 85 -- .../2.0.x/guides/using-ingress-with-grpc.md | 88 -- .../2.0.x/guides/using-kong-with-knative.md | 234 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../2.0.x/guides/using-kongplugin-resource.md | 464 ----------- .../2.0.x/guides/using-mtls-auth-plugin.md | 320 -------- .../2.0.x/guides/using-oidc-plugin.md | 150 ---- .../2.0.x/guides/using-rewrites.md | 200 ----- .../2.0.x/guides/using-tcpingress.md | 254 ------ .../2.0.x/guides/using-udpingress.md | 353 -------- .../2.0.x/index.md | 93 --- .../2.0.x/references/annotations.md | 537 ------------ .../2.0.x/references/cli-arguments.md | 78 -- .../2.0.x/references/custom-resources.md | 419 ---------- .../2.0.x/references/plugin-compatibility.md | 106 --- .../2.0.x/references/prometheus.md | 26 - .../2.0.x/references/version-compatibility.md | 78 -- .../2.0.x/troubleshooting.md | 284 ------- .../2.1.x/concepts/custom-resources.md | 134 --- .../2.1.x/concepts/deployment.md | 306 ------- .../2.1.x/concepts/design.md | 64 -- .../2.1.x/concepts/ha-and-scaling.md | 66 -- .../2.1.x/concepts/ingress-classes.md | 134 --- .../2.1.x/concepts/ingress-versions.md | 172 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../2.1.x/concepts/security.md | 67 -- .../2.1.x/deployment/admission-webhook.md | 216 ----- .../2.1.x/deployment/aks.md | 76 -- .../2.1.x/deployment/eks.md | 90 -- .../2.1.x/deployment/gke.md | 128 --- .../2.1.x/deployment/k4k8s-enterprise.md | 147 ---- .../2.1.x/deployment/k4k8s.md | 76 -- .../2.1.x/deployment/kong-enterprise.md | 138 ---- .../2.1.x/deployment/minikube.md | 74 -- .../2.1.x/deployment/overview.md | 65 -- .../2.1.x/examples/001_multiple-services.yaml | 88 -- .../2.1.x/faq.md | 25 - .../2.1.x/guides/cert-manager.md | 381 --------- .../2.1.x/guides/configure-acl-plugin.md | 768 ----------------- .../guides/configuring-fallback-service.md | 187 ----- .../2.1.x/guides/configuring-health-checks.md | 356 -------- .../guides/configuring-https-redirect.md | 151 ---- .../2.1.x/guides/getting-started-istio.md | 521 ------------ .../2.1.x/guides/getting-started.md | 308 ------- .../2.1.x/guides/overview.md | 74 -- .../2.1.x/guides/preserve-client-ip.md | 111 --- .../2.1.x/guides/prometheus-grafana.md | 346 -------- .../2.1.x/guides/redis-rate-limiting.md | 225 ----- .../2.1.x/guides/setting-up-custom-plugins.md | 213 ----- .../2.1.x/guides/upgrade.md | 345 -------- .../2.1.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 229 ------ .../2.1.x/guides/using-external-service.md | 85 -- .../2.1.x/guides/using-ingress-with-grpc.md | 88 -- .../2.1.x/guides/using-kong-with-knative.md | 234 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../2.1.x/guides/using-kongplugin-resource.md | 464 ----------- .../2.1.x/guides/using-mtls-auth-plugin.md | 320 -------- .../2.1.x/guides/using-oidc-plugin.md | 150 ---- .../2.1.x/guides/using-rewrites.md | 200 ----- .../2.1.x/guides/using-tcpingress.md | 254 ------ .../2.1.x/guides/using-udpingress.md | 353 -------- .../2.1.x/index.md | 93 --- .../2.1.x/references/annotations.md | 537 ------------ .../2.1.x/references/cli-arguments.md | 78 -- .../2.1.x/references/custom-resources.md | 419 ---------- .../2.1.x/references/plugin-compatibility.md | 106 --- .../2.1.x/references/prometheus.md | 26 - .../2.1.x/references/version-compatibility.md | 80 -- .../2.1.x/troubleshooting.md | 284 ------- .../2.2.x/concepts/custom-resources.md | 134 --- .../2.2.x/concepts/deployment.md | 306 ------- .../2.2.x/concepts/design.md | 64 -- .../2.2.x/concepts/ha-and-scaling.md | 66 -- .../2.2.x/concepts/ingress-classes.md | 134 --- .../2.2.x/concepts/ingress-versions.md | 172 ---- .../concepts/k4k8s-with-kong-enterprise.md | 43 - .../2.2.x/concepts/security.md | 67 -- .../2.2.x/deployment/admission-webhook.md | 215 ----- .../2.2.x/deployment/aks.md | 76 -- .../2.2.x/deployment/eks.md | 90 -- .../2.2.x/deployment/gke.md | 128 --- .../2.2.x/deployment/install-gateway-apis.md | 70 -- .../2.2.x/deployment/k4k8s-enterprise.md | 147 ---- .../2.2.x/deployment/k4k8s.md | 76 -- .../2.2.x/deployment/kong-enterprise.md | 138 ---- .../2.2.x/deployment/minikube.md | 74 -- .../2.2.x/deployment/overview.md | 65 -- .../2.2.x/examples/001_multiple-services.yaml | 88 -- .../2.2.x/faq.md | 25 - .../2.2.x/guides/cert-manager.md | 384 --------- .../2.2.x/guides/configure-acl-plugin.md | 773 ------------------ .../guides/configuring-fallback-service.md | 187 ----- .../2.2.x/guides/configuring-health-checks.md | 356 -------- .../guides/configuring-https-redirect.md | 151 ---- .../2.2.x/guides/getting-started-istio.md | 521 ------------ .../2.2.x/guides/getting-started.md | 308 ------- .../2.2.x/guides/overview.md | 74 -- .../2.2.x/guides/preserve-client-ip.md | 111 --- .../2.2.x/guides/prometheus-grafana.md | 346 -------- .../2.2.x/guides/redis-rate-limiting.md | 225 ----- .../2.2.x/guides/setting-up-custom-plugins.md | 213 ----- .../2.2.x/guides/upgrade.md | 348 -------- .../2.2.x/guides/upstream-mtls.md | 104 --- .../using-consumer-credential-resource.md | 229 ------ .../2.2.x/guides/using-external-service.md | 85 -- .../2.2.x/guides/using-gateway-api.md | 208 ----- .../2.2.x/guides/using-ingress-with-grpc.md | 88 -- .../2.2.x/guides/using-kong-with-knative.md | 234 ------ .../using-kongclusterplugin-resource.md | 255 ------ .../guides/using-kongingress-resource.md | 251 ------ .../2.2.x/guides/using-kongplugin-resource.md | 464 ----------- .../2.2.x/guides/using-mtls-auth-plugin.md | 320 -------- .../2.2.x/guides/using-oidc-plugin.md | 150 ---- .../2.2.x/guides/using-rewrites.md | 200 ----- .../2.2.x/guides/using-tcpingress.md | 254 ------ .../2.2.x/guides/using-udpingress.md | 353 -------- .../2.2.x/index.md | 93 --- .../2.2.x/references/annotations.md | 537 ------------ .../2.2.x/references/cli-arguments.md | 80 -- .../2.2.x/references/custom-resources.md | 419 ---------- .../2.2.x/references/plugin-compatibility.md | 106 --- .../2.2.x/references/prometheus.md | 26 - .../2.2.x/references/version-compatibility.md | 80 -- .../2.2.x/troubleshooting.md | 295 ------- 369 files changed, 71550 deletions(-) delete mode 100644 app/_data/docs_nav_kic_1.0.x.yml delete mode 100644 app/_data/docs_nav_kic_1.1.x.yml delete mode 100644 app/_data/docs_nav_kic_1.2.x.yml delete mode 100644 app/_data/docs_nav_kic_1.3.x.yml delete mode 100644 app/_data/docs_nav_kic_2.0.x.yml delete mode 100644 app/_data/docs_nav_kic_2.1.x.yml delete mode 100644 app/_data/docs_nav_kic_2.2.x.yml delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/configuring-custom-entities.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/index.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.0.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/configuring-custom-entities.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/index.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.1.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/configuring-custom-entities.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/index.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.2.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/configuring-custom-entities.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/index.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/1.3.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/examples/001_multiple-services.yaml delete mode 100644 app/kubernetes-ingress-controller/2.0.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/upgrade.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/guides/using-udpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/index.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/prometheus.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.0.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/examples/001_multiple-services.yaml delete mode 100644 app/kubernetes-ingress-controller/2.1.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/upgrade.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/guides/using-udpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/index.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/prometheus.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.1.x/troubleshooting.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/deployment.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/design.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/ha-and-scaling.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/ingress-classes.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/ingress-versions.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/k4k8s-with-kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/concepts/security.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/admission-webhook.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/aks.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/eks.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/gke.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/install-gateway-apis.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/k4k8s-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/k4k8s.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/kong-enterprise.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/minikube.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/deployment/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/examples/001_multiple-services.yaml delete mode 100644 app/kubernetes-ingress-controller/2.2.x/faq.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/cert-manager.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/configure-acl-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/configuring-fallback-service.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/configuring-health-checks.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/configuring-https-redirect.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/getting-started-istio.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/getting-started.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/overview.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/preserve-client-ip.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/prometheus-grafana.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/redis-rate-limiting.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/setting-up-custom-plugins.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/upgrade.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/upstream-mtls.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-consumer-credential-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-external-service.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-gateway-api.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-ingress-with-grpc.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-kong-with-knative.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-kongclusterplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-kongingress-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-kongplugin-resource.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-mtls-auth-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-oidc-plugin.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-rewrites.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-tcpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/guides/using-udpingress.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/index.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/annotations.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/cli-arguments.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/custom-resources.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/plugin-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/prometheus.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/references/version-compatibility.md delete mode 100644 app/kubernetes-ingress-controller/2.2.x/troubleshooting.md diff --git a/app/_data/docs_nav_kic_1.0.x.yml b/app/_data/docs_nav_kic_1.0.x.yml deleted file mode 100644 index b17ef1eb1fe2..000000000000 --- a/app/_data/docs_nav_kic_1.0.x.yml +++ /dev/null @@ -1,114 +0,0 @@ -- title: Kong Ingress Controller - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/1.0.x/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - - text: Concepts - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions - - text: Deployment - items: - - text: Overview - url: /deployment/overview - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook - - text: Guides - items: - - text: Overview - url: /guides/overview - - text: Getting Started with KIC - url: /guides/getting-started - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Configuring Custom Entities - url: /guides/configuring-custom-entities - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip - - text: References - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting diff --git a/app/_data/docs_nav_kic_1.1.x.yml b/app/_data/docs_nav_kic_1.1.x.yml deleted file mode 100644 index 5a0d375d93c0..000000000000 --- a/app/_data/docs_nav_kic_1.1.x.yml +++ /dev/null @@ -1,114 +0,0 @@ -- title: Kong Ingress Controller - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/1.1.x/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - - text: Concepts - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions - - text: Deployment - items: - - text: Overview - url: /deployment/overview - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook - - text: Guides - items: - - text: Overview - url: /guides/overview - - text: Getting Started with KIC - url: /guides/getting-started - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Configuring Custom Entities - url: /guides/configuring-custom-entities - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip - - text: References - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting diff --git a/app/_data/docs_nav_kic_1.2.x.yml b/app/_data/docs_nav_kic_1.2.x.yml deleted file mode 100644 index e8714f26519c..000000000000 --- a/app/_data/docs_nav_kic_1.2.x.yml +++ /dev/null @@ -1,118 +0,0 @@ -- title: Introduction - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/1.2.x/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - target_blank: true - -- title: Concepts - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions -- title: Deployment - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /deployment/overview - items: - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook -- title: Guides - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /guides/overview - items: - - text: Getting Started with KIC - url: /guides/getting-started - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Configuring Custom Entities - url: /guides/configuring-custom-entities - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip -- title: References - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting diff --git a/app/_data/docs_nav_kic_1.3.x.yml b/app/_data/docs_nav_kic_1.3.x.yml deleted file mode 100644 index 6f088d5c95ba..000000000000 --- a/app/_data/docs_nav_kic_1.3.x.yml +++ /dev/null @@ -1,118 +0,0 @@ -- title: Introduction - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - target_blank: true - -- title: Concepts - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions -- title: Deployment - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /deployment/overview - items: - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook -- title: Guides - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /guides/overview - items: - - text: Getting Started with KIC - url: /guides/getting-started - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Configuring Custom Entities - url: /guides/configuring-custom-entities - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip -- title: References - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting diff --git a/app/_data/docs_nav_kic_2.0.x.yml b/app/_data/docs_nav_kic_2.0.x.yml deleted file mode 100644 index 16f457eddddf..000000000000 --- a/app/_data/docs_nav_kic_2.0.x.yml +++ /dev/null @@ -1,126 +0,0 @@ -- title: Introduction - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - target_blank: true - -- title: Concepts - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions -- title: Deployment - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /deployment/overview - items: - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook -- title: Guides - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /guides/overview - items: - - text: Getting Started with KIC - url: /guides/getting-started - - text: Upgrading from KIC 1.3.x - url: /guides/upgrade - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the TCPIngress Resource - url: /guides/using-tcpingress - - text: Using the UDPIngress Resource - url: /guides/using-udpingress - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Exposing a UDP-based Service - url: /guides/using-udpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip -- title: References - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting - - text: Prometheus Metrics - url: /references/prometheus diff --git a/app/_data/docs_nav_kic_2.1.x.yml b/app/_data/docs_nav_kic_2.1.x.yml deleted file mode 100644 index 2c2786a62e9f..000000000000 --- a/app/_data/docs_nav_kic_2.1.x.yml +++ /dev/null @@ -1,126 +0,0 @@ -- title: Introduction - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - target_blank: true - -- title: Concepts - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions -- title: Deployment - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /deployment/overview - items: - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook -- title: Guides - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /guides/overview - items: - - text: Getting Started with KIC - url: /guides/getting-started - - text: Upgrading from previous versions - url: /guides/upgrade - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the KongPlugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the KongClusterPlugin Resource - url: /guides/using-kongclusterplugin-resource - - text: Using the TCPIngress Resource - url: /guides/using-tcpingress - - text: Using the UDPIngress Resource - url: /guides/using-udpingress - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Exposing a UDP-based Service - url: /guides/using-udpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip -- title: References - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting - - text: Prometheus Metrics - url: /references/prometheus diff --git a/app/_data/docs_nav_kic_2.2.x.yml b/app/_data/docs_nav_kic_2.2.x.yml deleted file mode 100644 index 88980378db8a..000000000000 --- a/app/_data/docs_nav_kic_2.2.x.yml +++ /dev/null @@ -1,130 +0,0 @@ -- title: Introduction - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /kubernetes-ingress-controller/ - absolute_url: true - items: - - text: FAQ - url: /faq - - text: Changelog - url: https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md - absolute_url: true - target_blank: true - -- title: Concepts - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: Architecture - url: /concepts/design - - text: Custom Resources - url: /concepts/custom-resources - - text: Deployment Methods - url: /concepts/deployment - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /concepts/k4k8s-with-kong-enterprise - - text: High-Availability and Scaling - url: /concepts/ha-and-scaling - - text: Resource Classes - url: /concepts/ingress-classes - - text: Security - url: /concepts/security - - text: Ingress Resource API Versions - url: /concepts/ingress-versions -- title: Deployment - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /deployment/overview - items: - - text: Kong Ingress on Minikube - url: /deployment/minikube - - text: Kong for Kubernetes - url: /deployment/k4k8s - - text: Kong for Kubernetes Enterprise - url: /deployment/k4k8s-enterprise - - text: Kong for Kubernetes with Kong Gateway Enterprise - url: /deployment/kong-enterprise - - text: Kong Ingress on AKS - url: /deployment/aks - - text: Kong Ingress on EKS - url: /deployment/eks - - text: Kong Ingress on GKE - url: /deployment/gke - - text: Admission Controller - url: /deployment/admission-webhook - - text: Installing Gateway APIs - url: /deployment/install-gateway-apis -- title: Guides - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - url: /guides/overview - items: - - text: Getting Started with KIC - url: /guides/getting-started - - text: Upgrading from previous versions - url: /guides/upgrade - - text: Getting Started using Istio - url: /guides/getting-started-istio - - text: Using Custom Resources - items: - - text: Using the Kong(Cluster)Plugin Resource - url: /guides/using-kongplugin-resource - - text: Using the KongIngress Resource - url: /guides/using-kongingress-resource - - text: Using KongConsumer and Credential Resources - url: /guides/using-consumer-credential-resource - - text: Using the TCPIngress Resource - url: /guides/using-tcpingress - - text: Using the UDPIngress Resource - url: /guides/using-udpingress - - text: Using the ACL and JWT Plugins - url: /guides/configure-acl-plugin - - text: Using cert-manager with Kong - url: /guides/cert-manager - - text: Configuring a Fallback Service - url: /guides/configuring-fallback-service - - text: Using an External Service - url: /guides/using-external-service - - text: Configuring HTTPS Redirects for Services - url: /guides/configuring-https-redirect - - text: Using Redis for Rate Limiting - url: /guides/redis-rate-limiting - - text: Integrate KIC with Prometheus/Grafana - url: /guides/prometheus-grafana - - text: Configuring Circuit-Breaker and Health-Checking - url: /guides/configuring-health-checks - - text: Setting up a Custom Plugin - url: /guides/setting-up-custom-plugins - - text: Using Ingress with gRPC - url: /guides/using-ingress-with-grpc - - text: Setting up Upstream mTLS - url: /guides/upstream-mtls - - text: Exposing a TCP-based Service - url: /guides/using-tcpingress - - text: Exposing a UDP-based Service - url: /guides/using-udpingress - - text: Using the mTLS Auth Plugin - url: /guides/using-mtls-auth-plugin - - text: Using the OpenID Connect Plugin - url: /guides/using-oidc-plugin - - text: Rewriting Hosts and Paths - url: /guides/using-rewrites - - text: Preserving Client IP Address - url: /guides/preserve-client-ip - - text: Using Gateway API - url: /guides/using-gateway-api - - text: Using Kong with Knative - url: /guides/using-kong-with-knative -- title: References - icon: /assets/images/icons/documentation/icn-kubernetes-color.svg - items: - - text: KIC Annotations - url: /references/annotations - - text: CLI Arguments - url: /references/cli-arguments - - text: Custom Resource Definitions - url: /references/custom-resources - - text: Plugin Compatibility - url: /references/plugin-compatibility - - text: Version Compatibility - url: /references/version-compatibility - - text: Troubleshooting - url: /troubleshooting - - text: Prometheus Metrics - url: /references/prometheus diff --git a/app/_data/kong_versions.yml b/app/_data/kong_versions.yml index 23e0f8f41539..997dc3b06544 100644 --- a/app/_data/kong_versions.yml +++ b/app/_data/kong_versions.yml @@ -245,33 +245,6 @@ label: unreleased - edition: "konnect" - edition: "contributing" -- release: "1.0.x" - version: "1.0.0" - edition: "kubernetes-ingress-controller" -- release: "1.1.x" - version: "1.1.1" - edition: "kubernetes-ingress-controller" -- release: "1.2.x" - version: "1.2.0" - edition: "kubernetes-ingress-controller" -- release: "1.3.x" - version: "1.3.4" - edition: "kubernetes-ingress-controller" -- release: "2.0.x" - version: "2.0.7" - edition: "kubernetes-ingress-controller" -- release: "2.1.x" - version: "2.1.1" - edition: "kubernetes-ingress-controller" -- release: "2.2.x" - version: "2.2.1" - edition: "kubernetes-ingress-controller" -- release: "2.3.x" - version: "2.3.1" - edition: "kubernetes-ingress-controller" -- release: "2.4.x" - version: "2.4.2" - edition: "kubernetes-ingress-controller" - release: "2.5.x" version: "2.5.0" edition: "kubernetes-ingress-controller" diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/custom-resources.md b/app/kubernetes-ingress-controller/1.0.x/concepts/custom-resources.md deleted file mode 100644 index 3ad4bee707c0..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/custom-resources.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Custom Resources ---- - -[Custom Resources][k8s-crd] in Kubernetes allow controllers -to extend Kubernetes-style -declarative APIs that are specific to certain applications. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -The {{site.kic_product_name}} uses the `configuration.konghq.com` API group -for storing configuration specific to Kong. - -The following CRDs allow users to declaratively configure all aspects of Kong: - -- [**KongIngress**](#kongingress) -- [**KongPlugin**](#kongplugin) -- [**KongClusterPlugin**](#kongclusterplugin) -- [**KongConsumer**](#kongconsumer) -- [**TCPIngress**](#tcpingress) -- [**KongCredential (Deprecated)**](#kongcredential-deprecated) - -## KongIngress - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, `KongIngress` Custom Resource is used as an -"extension" to the existing Ingress API to provide fine-grained control -over proxy behavior. -In other words, `KongIngress` works in conjunction with -the existing Ingress resource and extends it. -It is not meant as a replacement for the `Ingress` resource in Kubernetes. -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and [Route][kong-route] -entities in Kong related to an Ingress resource can be modified. - -Once a `KongIngress` resource is created, you can use the `configuration.konghq.com` -annotation to associate the `KongIngress` resource with an `Ingress` or a `Service` -resource: - -- When the annotation is added to the `Ingress` resource, the routing - configurations are updated, meaning all routes associated with the annotated - `Ingress` are updated to use the values defined in the `KongIngress`'s route - section. -- When the annotation is added to a `Service` resource in Kubernetes, - the corresponding `Service` and `Upstream` in Kong are updated to use the - `proxy` and `upstream` blocks as defined in the associated - `KongIngress` resource. - -The below diagram shows how the resources are linked -with one another: - -![Associating Kong Ingress](/assets/images/products/kubernetes-ingress-controller/kong-ingress-association.png "Associating Kong Ingress") - -## KongPlugin - -Kong is designed around an extensible [plugin][kong-plugin] -architecture and comes with a -wide variety of plugins already bundled inside it. -These plugins can be used to modify the request/response or impose restrictions -on the traffic. - -Once this resource is created, the resource needs to be associated with an -`Ingress`, `Service`, or `KongConsumer` resource in Kubernetes. -For more details, please read the reference documentation on `KongPlugin`. - -The below diagram shows how you can link `KongPlugin` resource to an -`Ingress`, `Service`, or `KongConsumer`: - -| | | -:-:|:-: -![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association1.png)|![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association2.png) - -## KongClusterPlugin - -_This resource requires the [`kubernetes.io/ingress.class` annotation](/kubernetes-ingress-controller/{{page.release}}/references/annotations/)._ - -KongClusterPlugin resource is exactly same as KongPlugin, except that it is a -Kubernetes cluster-level resources instead of being a namespaced resource. -This can help when the configuration of the plugin needs to be centralized -and the permissions to add/update plugin configuration rests with a different -persona than application owners. - -This resource can be associated with `Ingress`, `Service` or `KongConsumer` -and can be used in the exact same way as KongPlugin. - -A namespaced KongPlugin resource takes priority over a -KongClusterPlugin with the same name. - -## KongConsumer - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This custom resource configures `Consumers` in Kong. -Every `KongConsumer` resource in Kubernetes directly translates to a -[Consumer][kong-consumer] object in Kong. - -## TCPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing non-HTTP -and non-GRPC services running inside Kubernetes to -the outside world via Kong. This proves to be useful when -you want to use a single cloud LoadBalancer for all kinds -of traffic into your Kubernetes cluster. - -It is very similar to the Ingress resource that ships with Kubernetes. - -## KongCredential (Deprecated) - -Once a `KongConsumer` resource is created, -credentials associated with the `Consumer` can be provisioned inside Kong -using KongCredential custom resource. - -This Custom Resource has been deprecated and will be removed in a future -release. -Instead, please use secret-based credentials. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-upstream]:/gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ \ No newline at end of file diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/deployment.md b/app/kubernetes-ingress-controller/1.0.x/concepts/deployment.md deleted file mode 100644 index 3fee76eeebd9..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/deployment.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Kong Ingress Controller Deployment ---- - -The {{site.kic_product_name}} is designed to be deployed in a variety of ways -based on uses-cases. This document explains various components involved -and choices one can make as per the specific use-case. - -- [**Kubernetes Resources**](#kubernetes-resources): - Various Kubernetes resources required to run the {{site.kic_product_name}}. -- [**Deployment options**](#deployment-options): - A high-level explanation of choices that one should consider and customize - the deployment to best serve a specific use case. - -## Kubernetes Resources - -The following resources are used to run the {{site.kic_product_name}}: - -- [Namespace](#namespace) -- [Custom resources](#custom-resources) -- [RBAC permissions](#rbac-permissions) -- [Ingress Controller Deployment](#ingress-controller-deployment) -- [Kong Proxy service](#kong-proxy-service) -- [Database deployment and migrations](#database-deployment-and-migration) - -These resources are created if the reference deployment manifests are used -to deploy the {{site.kic_product_name}}. -The resources are explained below for users to gain an understanding of how -they are used, so that they can be tweaked as necessary for a specific use-case. - -### Namespace - -> optional - -The {{site.kic_product_name}} can be deployed in any [namespace][k8s-namespace]. -If {{site.kic_product_name}} is being used to proxy traffic for all namespaces -in a Kubernetes cluster, which is generally the case, -it is recommended that it is installed in a dedicated -`kong` namespace but it is not required to do so. - -The example deployments present in this repository automatically create a `kong` -namespace and deploy resources into that namespace. - -### Custom Resources - -> required - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, custom resources are used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to the [custom resources](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/) -concept document for details. - -### RBAC permissions - -> required - -The {{site.kic_product_name}} communicates with the Kubernetes API-server and -dynamically configures Kong to automatically load balance across pods -of a service as any service is scaled in our out. - -For this reason, it requires RBAC permissions to access resources stored -in Kubernetes object store. - -It needs read permissions (get,list,watch) -on the following Kubernetes resources: - -- Endpoints -- Nodes -- Pods -- Secrets -- Ingress -- KongPlugins -- KongConsumers -- KongCredentials -- KongIngress - -By default, the controller listens for events and above resources across -all namespaces and will need access to these resources at the cluster level -(using `ClusterRole` and `ClusterRoleBinding`). - -In addition to these, it needs: - -- Create a ConfigMap and read and update ConfigMap for to facilitate - leader-election. Please read this [document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) - for more details. -- Update permission on the Ingress resource to update the status of - the Ingress resource. - -If the Ingress Controller is listening for events on a single namespace, -these permissions can be updated to restrict these permissions to a specific -namespace using `Role` and `RoleBinding resources`. - -In addition to these, it is necessary to create a `ServiceAccount`, which -has the above permissions. The Ingress Controller Pod then has this -`ServiceAccount` association. This gives the Ingress Controller process -necessary authentication and authorization tokens to communicate with the -Kubernetes API-server. - -### Ingress Controller deployment - -> required - -Kong Ingress deployment consists of the Ingress Controller deployed alongside -Kong. The deployment will be different depending on if a database is being -used or not. - -The deployment(s) is the core which actually runs the {{site.kic_product_name}}. - -See the [database](#database) section below for details. - -### Kong Proxy service - -> required - -Once the {{site.kic_product_name}} is deployed, one service is needed to -expose Kong outside the Kubernetes cluster so that it can receive all traffic -that is destined for the cluster and route it appropriately. -`kong-proxy` is a Kubernetes service which points to the Kong pods which are -capable of proxying request traffic. This service will be usually of type -`LoadBalancer`, however it is not required to be such. -The IP address of this service should be used to configure DNS records -of all the domains that Kong should be proxying, to route the traffic to Kong. - -### Database deployment and migration - -> optional - -The {{site.kic_product_name}} can run with or without a database. -If a database is being deployed, then following resources are required: - -- A `StatefulSet` which runs a PostgreSQL pod backed with a `PersistenceVolume` - to store Kong's configuration. -- An internal `Service` which resolves to the PostgreSQL pod. This ensures - that Kong can find the PostgreSQL instance using DNS inside - the Kubernetes cluster. -- A batch `Job` to run schema migrations. This is required to be executed once - to install bootstrap Kong's database schema. - Please note that on an any upgrade for Kong version, another `Job` will - need to be created if the newer version contains any migrations. - -To figure out if you should be using a database or not, please refer to the -[database](#database) section below. - -## Deployment options - -Following are the difference options to consider while deploying the -{{site.kic_product_name}} for your specific use case: - -- [**Kubernetes Service Type**](#kubernetes-service-types): - Chose between Load Balancer vs Node-Port -- [**Database**](#database): - Backing Kong with a Database or running without a database -- [**Multiple Ingress Controllers**](#multiple-ingress-controllers): - Running multiple {{site.kic_product_name}}s inside the same Kubernetes cluster -- [**Runtime**](#runtime): - Using Kong or {{site.ee_product_name}} (for {{site.ee_product_name}} customers) - -### Kubernetes Service Types - -Once deployed, any Ingress Controller needs to be exposed outside the -Kubernetes cluster to start accepting external traffic. -In Kubernetes, `Service` abstraction is used to expose any application -to the rest of the cluster or outside the cluster. - -If your Kubernetes cluster is running in a cloud environment, where -Load Balancers can be provisioned with relative ease, it is recommended -that you use a Service of type `LoadBalancer` to expose Kong to the outside -world. For the Ingress Controller to function correctly, it is also required -that a L4 (or TCP) Load Balancer is used and not an L7 (HTTP(s)) one. - -If your Kubernetes cluster doesn't support a service of type `LoadBalancer`, -then it is possible to use a service of type `NodePort`. - -### Database - -Until Kong 1.0, a database was required to run Kong. -Kong 1.1 introduced a new mode, DB-less, in which Kong can be configured -using a config file, and removes the need to use a database. - -It is possible to deploy and run the {{site.kic_product_name}} with or without a -database. The choice depends on the specific use-case and results in no -loss of functionality. - -#### Without a database - -In DB-less deployments, Kong's Kubernetes ingress controller runs -alongside Kong and configures Kong and dynamically configures -Kong as per the changes it receives from the Kubernetes API server. - -Following figure shows how this deployment looks like: - -![Kong DB-less](/assets/images/products/kubernetes-ingress-controller/dbless-deployment.png "Kong DB-less architecture") - -In this deployment, only one Deployment is required, which is comprised of -a Pod with two containers, a Kong container which proxies the requests -and a controller container which configures Kong. - -`kong-proxy` service would point to the ports of the Kong container in the -above deployment. - -Since each pod contains a controller and a Kong container, scaling out -simply requires horizontally scaling this deployment to handle more traffic -or to add redundancy in the infrastructure. - -#### With a Database - -In a deployment where Kong is backed by a DB, the deployment architecture -is a little different. - -Please refer to the below figure: - -![Kong with a Database](/assets/images/products/kubernetes-ingress-controller/db-deployment.png "Kong with database") - -In this type of deployment, there are two types of deployments created, -separating the control and data flow: - -- **Control-plane**: This deployment consists of a pod(s) running - the controller alongside - a Kong container, which can only configure the database. This deployment - does not proxy any traffic but only configures Kong. If multiple - replicas of this pod are running, a leader election process will ensure - that only one of the pods is configuring Kong's database at a time. -- **Data-plane**: This deployment consists of pods running a - single Kong container which can proxy traffic based on the configuration - it loads from the database. This deployment should be scaled to respond - to change in traffic profiles and add redundancy to safeguard from node - failures. -- **Database**: The database is used to store Kong's configuration and propagate - changes to all the Kong pods in the cluster. All Kong containers, in the - cluster should be able to connect to this database. - -A database driven deployment should be used if your use-case requires -dynamic creation of Consumers and/or credentials in Kong at a scale large -enough that the consumers will not fit entirely in memory. - -## Multiple Ingress Controllers - -It is possible to run multiple instances of the {{site.kic_product_name}} or -run a Kong {{site.kic_product_name}} alongside other Ingress Controllers inside -the same Kubernetes cluster. - -There are a few different ways of accomplishing this: - -- Using `kubernetes.io/ingress.class` annotation: - It is common to deploy Ingress Controllers on a cluster level, meaning - an Ingress Controller will satisfy Ingress rules created in all the namespaces - inside a Kubernetes cluster. - Use the annotation on Ingress and Custom resources to segment - the Ingress resources between multiple Ingress Controllers. - **Warning!** - When you use another Ingress Controller, which is default for cluster - (without set any `kubernetes.io/ingress.class`), be aware of using default `kong` - ingress class. There is special behavior of the default `kong` ingress class, - where any ingress resource that is not annotated is picked up. - Therefore with different ingress class then `kong`, you have to use that - ingress class with every Kong CRD object (plugin, consumer) which you use. -- Namespace based isolation: - {{site.kic_product_name}} supports a deployment option where it will satisfy - Ingress resources in a specific namespace. With this model, one can deploy - a controller in multiple namespaces and they will run in an isolated manner. -- If you are using {{site.ee_product_name}}, you can run multiple Ingress Controllers - pointing to the same database and configuring different Workspaces inside - {{site.ee_product_name}}. With such a deployment, one can use either of the above - two approaches to segment Ingress resources into different Workspaces in - {{site.ee_product_name}}. - -## Runtime - -The {{site.kic_product_name}} is compatible a variety of runtimes: - -### {{site.base_gateway}} (OSS) - -This is the [Open-Source Gateway](https://github.com/kong/kong) runtime. -The Ingress Controller is primarily developed against releases of the -open-source gateway. - -### {{site.ee_product_name}} K8S - -If you are a {{site.ee_product_name}} customer, you have access to two more runtimes. - -The first one, {{site.ee_product_name}} K8S, is an package that takes the Open-Source -{{site.base_gateway}} and adds enterprise-only plugins to it. - -You simply need to deploy {{site.ee_product_name}} K8S instead of the Open-Source -Gateway in-order to take full-advantage of enterprise plugins. - -### {{site.ee_product_name}} - -The {{site.kic_product_name}} is also compatible with the full-blown version of -{{site.ee_product_name}}. This runtime ships with Kong Manager, Kong Portal, and a -number of other enterprise-only features. -[This doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise) provides a high-level -overview of the architecture. - -[k8s-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/design.md b/app/kubernetes-ingress-controller/1.0.x/concepts/design.md deleted file mode 100644 index 62402fec8f0b..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/design.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kong Ingress Controller Design ---- - -## Overview - -The {{site.kic_product_name}} configures Kong -using Ingress resources created inside a Kubernetes cluster. - -The {{site.kic_product_name}} is made up of two components: - -- Kong, the core proxy that handles all the traffic -- Controller, a process that syncs the configuration from Kubernetes to Kong - -The {{site.kic_product_name}} performs more than just proxying the traffic coming -into a Kubernetes cluster. It is possible to configure plugins, -load balancing, health checking and leverage all that Kong offers in a -standalone installation. - -The following figure shows how it works: - -![high-level-design](/assets/images/products/kubernetes-ingress-controller/high-level-design.png "High Level Design") - -The Controller listens for changes happening inside the Kubernetes -cluster and updates Kong in response to those changes to correctly -proxy all the traffic. - -Kong is updated dynamically to respond to changes around scaling, -configuration changes, failures that are happening inside a Kubernetes -cluster. - -## Translation - -Kubernetes resources are mapped to Kong resources to correctly -proxy all the traffic. - -The following figure describes the mapping between Kubernetes concepts -to Kong's configuration: - -![translating k8s to kong](/assets/images/products/kubernetes-ingress-controller/k8s-to-kong.png "Translating k8s resources to Kong") - -Let's go through how Kubernetes resources are being mapped to Kong's -configuration: - -- An [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) - resource in Kubernetes defines a set of rules for proxying - traffic. These rules corresponds to the concept of Route in Kong. -- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - inside Kubernetes is a way to abstract an application that is - running on a set of pods. - This maps to two objects in Kong: Service and Upstream. - The service object in Kong holds the information on the protocol - to use to talk to the upstream service and various other protocol - specific settings. The Upstream object defines load balancing - and healthchecking behavior. -- Pods associated with a Service in Kubernetes map as a Target belonging - to the Upstream (the upstream corresponding to the Kubernetes - Service) in Kong. Kong load balances across the Pods of your service. - This means that all requests flowing through Kong are not directed via - kube-proxy but directly to the pod. - -For more information on how Kong works with Routes, Services, and Upstreams, -please see the [Proxy](/gateway/latest/reference/proxy/) -and [Load balancing](/gateway/latest/reference/loadbalancing/) references. diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/ha-and-scaling.md b/app/kubernetes-ingress-controller/1.0.x/concepts/ha-and-scaling.md deleted file mode 100644 index 86b6e710c9f4..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/ha-and-scaling.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: High-availability and Scaling ---- - -## High availability - -The {{site.kic_product_name}} is designed to be reasonably easy to operate and -be highly available, meaning, when some expected failures do occur, the -Controller should be able to continue to function with minimum possible -service disruption. - -The {{site.kic_product_name}} is composed of two parts: 1. Kong, which handles -the requests, 2. Controller, which configures Kong dynamically. - -Kong itself can be deployed in a Highly available manner by deploying -multiple instances (or pods). Kong nodes are state-less, meaning a Kong pod -can be terminated and restarted at any point of time. - -The controller itself can be stateful or stateless, depending on if a database -is being used or not. - -If a database is not used, then the Controller and Kong are deployed as -colocated containers in the same pod and each controller configures the Kong -container that it is running with. - -For cases when a database is necessary, the Controllers can be deployed -on multiple zones to provide redudancy. In such a case, a leader election -process will elect one instance as a leader, which will manipulate Kong's -configuration. - -### Leader election - -The {{site.kic_product_name}} performs a leader-election when multiple -instances of the controller are running to ensure that only a single Controller -is actively pushing changes to Kong's database (when running in DB-mode). -If multiple controllers are making changes to the database, it is possible that -the controllers step over each other. -If an instance of the controller fails, any other container which is a follower, -takes up the leadership and then continues syncing Kong's configuration from -Kubernetes. - -For this reason, the Controller needs permission to create a ConfigMap. -By default, the permission is given at Cluster level but it can be narrowed -down to a single namespace (using Role and RoleBinding) for a stricter RBAC -policy. - -It also needs permission to read and update this ConfigMap. -This permission can be specific to the ConfigMap that is being used -for leader-election purposes. -The name of the ConfigMap is derived from the value of election-id CLI flag -(default: `ingress-controller-leader`) and -ingress-class (default: `kong`) as: "-". -For example, the default ConfigMap that is used for leader election will -be "ingress-controller-leader-kong", and it will be present in the same -namespace that the controller is deployed in. - -## Scaling - -Kong is designed to be horizontally scalable, meaning as traffic increases, -multiple instances of Kong can be deployed to handle the increase in load. - -The configuration is either pumped into Kong directly via the Ingress -Controller or loaded via the database. Kong containers can be considered -stateless as the configuration is either loaded from the database (and -cached heavily in-memory) or loaded in-memory directly via a config file. - -One can use a `HorizontalPodAutoscaler` (HPA) based on metrics -like CPU utilization, bandwidth being used, total request count per second -to dynamically scale {{site.kic_product_name}} as the traffic profile changes. diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-classes.md b/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-classes.md deleted file mode 100644 index 85790f0dbd66..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-classes.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Kong Ingress Controller and Ingress Class ---- - -## Introduction - -The {{site.kic_product_name}} uses ingress classes to filter Kubernetes Ingress -objects and other resources before converting them into Kong configuration. -This allows it to coexist with other ingress controllers and/or other -deployments of the {{site.kic_product_name}} in the same cluster: a -{{site.kic_product_name}} will only process configuration marked for its use. - -## Configuring the controller ingress class - -The `--ingress-class` flag (or `CONTROLLER_INGRESS_CLASS` environment variable) -specify the ingress class expected by the {{site.kic_product_name}}. By default, -it expects the `kong` class. - -## Loading resources by class - -The {{site.kic_product_name}} translates a variety of Kubernetes resources into -Kong configuration. Broadly speaking, we can separate these resources into two -categories: - -- Resources that the controller translates directly into Kong configuration. -- Resources referenced by some other resource, where the other resource is - directly translated into Kong configuration. - -For example, an Ingress is translated directly into a Kong route, and a -KongConsumer is translated directly into a -[Kong consumer](/gateway/api/admin-ee/latest/#/Consumers/list-consumer/). A Secret containing -an authentication plugin credential is _not_ translated directly: it is only -translated into Kong configuration if a KongConsumer resource references it. - -Because they create Kong configuration independent of any other resources, -directly-translated resources require an ingress class, and their class must -match the class configured for the controller. Referenced resources do not -require a class, but must be referenced by a directly-translated resource -that matches the controller. - -### Adding class information to resources - -Most resources use a [kubernetes.io/ingress-class annotation][class-annotation] -to indicate their class. There are several exceptions: - -- v1 Ingress resources have a dedicated `class` field. -- Knative Services [use the class specified][knative-class] by the - `ingress.class` key of the Knative installation's `config-network` ConfigMap. - You can optionally [override this on a per-Service basis][knative-override] - by adding a `networking.knative.dev/ingress.class` annotation to the Service. - -### Enabling support for classless resources - -Specifying a class is optional for some resources. Although specifying a class -is recommended, you can instruct the controller to process resources without a -class annotation using flags: - -- `--process-classless-ingress-v1beta1` instructs the controller to translate - v1beta1 Ingress resources with no class annotation. -- `--process-classless-kong-consumer` instructs the controller to translate - KongConsumer resources with no class annotation. - -These flags are primarily intended for compatibility with older configuration -({{site.kic_product_name}} before 0.10 had less strict class -requirements, and it was common to omit class annotations). If you are creating -new configuration and do not have older configuration without class -annotations, recommended best practice is to add class information to Ingress -and KongConsumer resources and not set the above flags. Doing so avoids -accidentally creating duplicate configuration in other ingress controller -instances. - -These flags do not _ignore_ `ingress.class` annotations: they allow resources -with no such annotation, but will not allow resource that have a non-matching -`ingress.class` annotation. - -## When to use a custom class - -Using the default `kong` class is fine for simpler deployments, where only one -{{site.kic_product_name}} instance is running in a cluster. Changing the class is -typical when: - -- You install multiple Kong environments in one Kubernetes cluster to handle - different types of ingress traffic, e.g. when using separate Kong instances - to handle traffic on internal and external load balancers, or deploying - different types of non-production environments in a single test cluster. -- You install multiple controller instances alongside a single Kong cluster to - separate configuration into different Kong workspaces (using the - `--kong-workspace` flag) or to restrict which Kubernetes namespaces any one - controller instance has access to. - -## Legacy behavior - -This overview covers behavior in {{site.kic_product_name}} version 0.10.0 onward. -Earlier versions had a special case for the default class and a bug affecting -custom classes: - -- When using the default `kong` class, the controller would always process - classless resources in addition to `kong`-class resources. When using a - non-default controller class, the controller would only process resources - with that class, not classless resources. Although this was by design, it was - a source of user confusion. -- When using a custom controller class, some resources that should not have - required a class (because they were referenced by other resources) - effectively did require a class: while these resources were loaded initially, - the controller would not track updates to them unless they had a class - annotation. - -In versions 0.10.0+ you must instruct the controller to load classless -resources, which is allowed (but not recommended) for either the default or -custom classes. Resources referenced by another resource are always loaded and -updated correctly regardless of which class you set on the controller; you do -not need to add class annotations to these resources when using a custom class. - -## Examples - -Typical configurations will include a mix of resources that have class -information and resources that are referenced by them. For example, consider -the following configuration for authenticating a request, using a KongConsumer, -credential Secret, Ingress, and KongPlugin (a Service is implied, but not -shown): - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: dyadya-styopa - annotations: - kubernetes.io/ingress.class: "kong" -username: styopa -credentials: -- styopa-key - ---- - -kind: Secret -apiVersion: v1 -stringData: - key: bylkogdatomoryakom - kongCredType: key-auth -metadata: - name: styopa-key - ---- - -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: ktonezhnaet - annotations: - kubernetes.io/ingress.class: "kong" - konghq.com/plugins: "key-auth-example" -spec: - rules: - - http: - paths: - - path: /vsemznakom - backend: - serviceName: httpbin - servicePort: 80 - ---- - -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: key-auth-example -plugin: key-auth -``` - -The KongConsumer and Ingress resources both have class annotations, as they are -resources that the controller uses as a basis for building Kong configuration. -The Secret and KongPlugin _do not_ have class annotations, as they are -referenced by other resources that do. - -[class-annotation]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#kubernetesioingressclass -[knative-class]: /kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/#ingress-class -[knative-override]: https://knative.dev/docs/serving/services/ingress-class/ diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-versions.md b/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-versions.md deleted file mode 100644 index 86cf67d9ed4b..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/ingress-versions.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Ingress v1 and v1beta1 Differences ---- - -## Introduction - -Kubernetes 1.19 introduced a new `networking.k8s.io/v1` API for the [Ingress resource][kubernetes-ingress-doc]. -It standardizes common practices and clarifies implementation requirements that -were previously up to individual controller vendors. This document covers those -changes as they relate to {{site.kic_product_name}} and provides sample -equivalent `networking.k8s.io/v1beta1` and `networking.k8s.io/v1` resources for comparison. - -## Paths - -Both Ingress v1beta1 and v1 HTTP rules require a path, which represents a [URI -path][uri-rfc-paths]. Although v1beta1 had specified that paths were [POSIX -regular expressions][posix-regex] and enforced this, in practice most -controllers used other implementations that did not match the -specification. v1 seeks to reduce confusion by introducing several [path -types][path-types] and lifting restrictions on regular expression grammars used -by controllers. - -### networking.k8s.io/v1beta1 - -The controller passes paths directly to Kong and relies on its [path handling -logic][kong-paths]. The Kong proxy treats paths as a prefix unless they include -characters [not allowed in RFC 3986 paths][uri-rfc-paths], in which case the -proxy assumes they are a regular expression, and does not treat slashes as -special characters. For example, the prefix `/foo` can match any of the -following: - -``` -/foo -/foo/ -/foobar -/foo/bar -``` - -### networking.k8s.io/v1 - -Although v1 Ingresses provide path types with more clearly-defined logic, the -controller must still create Kong routes and work within the Kong proxy's -routing logic. As such, the controller translates Ingress rule paths to create -Kong routes that match one of the following specifications: `Exact`, `Prefix`, or `ImplementationSpecific`. - -#### Exact - -If `pathType` is `Exact`, the controller creates a Kong route with a regular -expression that matches the rule path only. For example, an exact rule for `/foo` in an -Ingress translates to a Kong route with a `/foo$` regular expression path. - -#### Prefix - -If `pathType` is `Prefix`, the controller creates a Kong route with two path -criteria. For example, `/foo` will create a route with a `/foo$` regular expression and -`/foo/` plain path. - -#### ImplementationSpecific - -The controller leaves `ImplementationSpecific` path rules entirely up to the Kong -router. It creates a route with the exact same path string as the Ingress rule. - -{:.important} -> Both `Prefix` and `Exact` paths modify the paths you -provide, and those modifications may interfere with user-provided regular -expressions. If you are using your own regular expressions in paths, use -`ImplementationSpecific` to avoid unexpected behavior. - - -## Ingress class - -[Ingress class][ingress-class] indicates which resources an ingress controller -should process. It provides a means to separate out configuration intended for -other controllers or other instances of the {{site.kic_product_name}}. - -In v1beta1, ingress class was handled informally using -`kubernetes.io/ingress.class` [annotations][deprecated-annotation]. v1 -introduces a new [IngressClass resource][ingress-class-api] which provides -richer information about the controller. v1 Ingresses are bound to a class via -their `ingressClassName` field. - -For example, consider this v1beta1 Ingress: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kubernetes.io/ingress.class: "kong" -spec: - rules: - - host: example.com - http: - paths: - - path: /test - backend: - serviceName: echo - servicePort: 80 -``` - -Its ingress class annotation is set to `kong`, and ingress controllers set to -process `kong` class Ingresses will process it. - -In v1, the equivalent configuration declares a `kong` IngressClass resource -whose `metadata.name` field indicates the class name. The `ingressClassName` -value of the Ingress object must match the value of the `name` field in the -IngressClass metadata: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: kong -spec: - controller: ingress-controllers.konghq.com/kong ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - service: - name: test - port: - number: 80 -``` - -## Hostnames - -Ingress v1 formally codifies support for [wildcard hostnames][wildcard-hostnames]. -v1beta1 Ingresses did not reject wildcard hostnames, however, and Kong had -[existing support for them][kong-wildcard-hostnames]. - -As such, while the v1beta1 specification did not officially support wildcard -hostnames, you can use wildcard hostnames with either version. Setting a -hostname like `*.example.com` will match requests for both `foo.example.com` -and `bar.example.com` with either v1 or v1beta1 Ingresses. - -## Backend types - -Ingress v1 introduces support for backends other than Kubernetes Services through -[resource backends][resource-backends]. - -Kong does not support any dedicated resource backend configurations, though it -does have support for Routes without Services in some cases (for example, when -using the [AWS Lambda plugin][lambda-plugin]). For these routes, you should -create a placeholder Kubernetes Service for them, using an [ExternalName -Service][external-name] with an RFC 2606 invalid hostname, e.g. -`kong.invalid`. You can use these placeholder services with either v1 or -v1beta1 Ingresses. - -[kubernetes-ingress-doc]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-class]: /kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes -[uri-rfc-paths]: https://tools.ietf.org/html/rfc3986#section-3.3 -[posix-regex]: https://www.boost.org/doc/libs/1_38_0/libs/regex/doc/html/boost_regex/syntax/basic_extended.html -[path-types]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -[kong-paths]: /gateway/latest/reference/proxy/#request-path -[wildcard-hostnames]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards -[kong-wildcard-hostnames]: /gateway/latest/reference/proxy/#using-wildcard-hostnames -[resource-backends]: https://kubernetes.io/docs/concepts/services-networking/ingress/#resource-backend -[lambda-plugin]: /hub/kong-inc/aws-lambda/ -[external-name]: https://kubernetes.io/docs/concepts/services-networking/service/#externalname -[deprecated-annotation]: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation -[ingress-class-api]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-class-v1/ diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/k4k8s-with-kong-enterprise.md b/app/kubernetes-ingress-controller/1.0.x/concepts/k4k8s-with-kong-enterprise.md deleted file mode 100644 index 4331901f398f..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/k4k8s-with-kong-enterprise.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -Kong for Kubernetes is a {{site.kic_product_name}} built on top -of Open-Source {{site.base_gateway}}. - -If you are an Enterprise customer, you have an option of running the -[Enterprise version](/gateway/latest/install-and-run/kubernetes/) -of the Ingress Controller, which includes -all the Enterprise plugins but does not include Kong Manager or any -other Enterprise features. This makes it possible to -run the Ingress layer without a database, providing a very low -operational and maintenance footprint. - -However, in some cases, those enterprise features are necessary, -and for such use-cases we support another deployment - Kong for -Kubernetes with {{site.ee_product_name}}. - -As seen in the diagram below, this deployment consists of -Kong for Kubernetes deployed in Kubernetes, and is hooked up with -a database. If there are services running outside Kubernetes, -a regular {{site.base_gateway}} proxy can be deployed there and connected to the -same database. This provides a single pane of visibility of -all services that are running in your infrastructure. - -![architecture-overview](/assets/images/products/kubernetes-ingress-controller/k4k8s-with-kong-enterprise.png "K4K8S with {{site.ee_product_name}}") - -In this deployment model, the database for Kong can be hosted anywhere. -It can be a managed DBaaS service like Amazon RDS, Google Cloud -SQL or a Postgres instance managed in-house or even an instance -deployed on Kubernetes. -If you are following this model, please keep in mind the following: -- It is recommended to not deploy Postgres on Kubernetes, - due to the fact that running stateful applications on Kubernetes - is challenging to get right. -- Ensure that you have the same image/package of {{site.ee_product_name}} - running across the fleet. This means that all Kong instances that are - connected to the same database must use the - same version of {{site.ee_product_name}} package. - -[This guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise) -walks through the setup of the above architecture. diff --git a/app/kubernetes-ingress-controller/1.0.x/concepts/security.md b/app/kubernetes-ingress-controller/1.0.x/concepts/security.md deleted file mode 100644 index bbb08cd9073b..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/concepts/security.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Security ---- - -This document explains the security aspects of the {{site.kic_product_name}}. - -The {{site.kic_product_name}} communicates with Kubernetes API-server and Kong's -Admin API. APIs on both sides offer authentication/authorization features -and the controller integrates with them gracefully. - -## Kubernetes RBAC - -The {{site.kic_product_name}} is deployed with RBAC permissions as explained in the -[deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment) document. -It has read and list permissions on most resources but requires update -and create permission for a few resources to provide seamless integration. -The permissions can be locked down further if needed depending on the specific -use-case. -This RBAC policy is associated with a ServiceAccount and the ServiceAccount -is associated with the {{site.kic_product_name}}. -The Controller uses the ServiceAccount credential to authenticate and -authorize itself against the Kubernetes API-server. - -## Kong Admin API Protection - -Kong's Admin API is used to control configuration of Kong and proxying behavior. -If an attacker happens to gain access to Kong's Admin API, they -will be able to perform all actions as an authorized user like -modifying or deleting Kong's configuration. -Hence, it is important that the deployment -ensures that the likelihood of this happening is as small as possible. - -In the example deployments, the Controller and Kong's Admin API communicate -over the loopback (`lo`) interface of the pod. -Kong is not performing any kind of authorization or -authentication on the Admin API, hence the API is accessible only -on the loopback interface to limit the attack surface. -Although not ideal, this setup requires fewer steps -to get started and can be further hardened as required. - -Please note that it is very important that Kong's Admin API is not accessible -inside the cluster as any malicious service can change Kong's configuration. -If you're exposing Kong's Admin API itself outside the cluster, please ensure -that you have the necessary authentication in place first. - -### Authentication on Kong's Admin API - -If Kong's Admin API is protected with one of the authentication plugins, -the Controller can authenticate itself against it to add another layer of -security. -The Controller comes with support for injecting arbitrary HTTP headers -in the requests it makes to Kong's Admin API, which can be used to inject -authentication credentials. -The headers can be specified using the CLI flag `--kong-admin-header` in the Ingress -Controller. - -The Ingress Controller will support mutual-TLS-based authentication on Kong's Admin -API in future. - -### {{site.ee_product_name}} RBAC - -{{site.ee_product_name}} comes with support for authentication and authorization on -Kong's Admin API. - -Once an RBAC token is provisioned, the {{site.kic_product_name}} can use the RBAC -token to authenticate against {{site.ee_product_name}}. Use the `--kong-admin-header` CLI -flag to pass the RBAC token the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/admission-webhook.md b/app/kubernetes-ingress-controller/1.0.x/deployment/admission-webhook.md deleted file mode 100644 index 23ee8d5da9fb..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/admission-webhook.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Validating Admission Controller ---- - -The {{site.kic_product_name}} ships with an Admission Controller for KongPlugin -and KongConsumer resources in the `configuration.konghq.com` API group. - -The Admission Controller needs a TLS certificate and key pair which -you need to generate as part of the deployment. - -Following guide walks through a setup of how to create the required key-pair -and enable the admission controller. - -Please note that this requires {{site.kic_product_name}} >= 0.6 to be -already installed in the cluster. - -## tldr; - -If you are using the stock YAML manifests to install and setup Kong for -Kubernetes, then you can setup the admission webhook using a single command: - -```bash -curl -sL https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/hack/deploy-admission-controller.sh | bash - -``` - -This script takes all the following commands and packs them together. -You need `kubectl` and `openssl` installed on your workstation for this to -work. - -## Create a certificate for the admission controller - -Kuberentes API-server makes an HTTPS call to the Admission Controller to verify -if the custom resource is valid or not. For this to work, Kubernetes API-server -needs to trust the CA certificate that is used to sign Admission Controller's -TLS certificate. - -This can be accomplished either using a self-signed certificate or using -Kubernetes CA. Follow one of the steps below and then go to -[Create the secret](#create-the-secret) step below. - -Please note the `CN` field of the x509 certificate takes the form -`..svc`, which -in the default case is `kong-validation-webhook.kong.svc`. - -### Using self-signed certificate - -Use openssl to generate a self-signed certificate: - -```bash -$ openssl req -x509 -newkey rsa:2048 -keyout tls.key -out tls.crt -days 365 \ - -nodes -subj "/CN=kong-validation-webhook.kong.svc" \ - -extensions EXT -config <( \ - printf "[dn]\nCN=kong-validation-webhook.kong.svc\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:kong-validation-webhook.kong.svc\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -Generating a 2048 bit RSA private key -..........................................................+++ -.............+++ -writing new private key to 'key.pem' -``` - -### Using in-built Kubernetes CA - -Kubernetes comes with an in-built CA which can be used to provision -a certificate for the Admission Controller. -Please refer to the -[this guide](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) -on how to generate a certificate using the in-built CA. - -### Create the secret - -Next, create a Kubernetes secret object based on the key and certificate that -was generatd in the previous steps. -Here, we assume that the PEM-encoded certificate is stored in a file named -`tls.crt` and private key is stored in `tls.key`. - -```bash -$ kubectl create secret tls kong-validation-webhook -n kong \ - --key tls.key --cert tls.crt -secret/kong-validation-webhook created -``` - -## Update the deployment - -Once the secret is created, update the Ingress Controller deployment: - -Execute the following command to patch the {{site.kic_product_name}} deployment -to mount the certificate and key pair and also enable the admission controller: - -```bash -$ kubectl patch deploy -n kong ingress-kong \ - -p '{"spec":{"template":{"spec":{"containers":[{"name":"ingress-controller","env":[{"name":"CONTROLLER_ADMISSION_WEBHOOK_LISTEN","value":":8080"}],"volumeMounts":[{"name":"validation-webhook","mountPath":"/admission-webhook"}]}],"volumes":[{"secret":{"secretName":"kong-validation-webhook"},"name":"validation-webhook"}]}}}}' -deployment.extensions/ingress-kong patched -``` - -## Enable the validating admission - -If you are using Kubernetes CA to generate the certificate, you don't need -to supply a CA certificate (in the `caBunde` param) -as part of the Validation Webhook configuration -as the API-server already trusts the internal CA. - -```bash -$ echo "apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: kong-validations -webhooks: -- name: validations.kong.konghq.com - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: ["v1beta1"] - rules: - - apiGroups: - - configuration.konghq.com - apiVersions: - - '*' - operations: - - CREATE - - UPDATE - resources: - - kongconsumers - - kongplugins - - apiGroups: - - '' - apiVersions: - - 'v1' - operations: - - CREATE - - UPDATE - resources: - - secrets - clientConfig: - service: - namespace: kong - name: kong-validation-webhook - caBundle: $(cat tls.crt | base64 -w 0) " | kubectl apply -f - -``` - -## Verify if it works - -### Verify duplicate KongConsumers - -Create a KongConsumer with username as `harry`: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, create another KongConsumer with the same username: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry2 - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: consumer already exists -``` - -The validation webhook rejected the KongConsumer resource as there already -exists a consumer in Kong with the same username. - -### Verify incorrect KongPlugins - -Try to create the folowing KongPlugin resource. -The `foo` config property does not exist in the configuration definition and -hence the Admission Controller returns back an error. -If you remove the `foo: bar` configuration line, the plugin will be -created succesfully. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - foo: bar - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: 400 Bad Request {"fields":{"config":{"foo":"unknown field"}},"name":"schema violation","code":2,"message":"schema violation (config.foo: unknown field)"} -``` - -### Verify incorrect credential secrets - -With 0.7 and above versions of the controller, validations also take place -for incorrect secret types and wrong parameters to the secrets: - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=basic-auth \ - --from-literal=username=foo -Error from server: admission webhook "validations.kong.konghq.com" denied the request: missing required field(s): password -``` - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=wrong-auth \ - --from-literal=sdfkey=my-sooper-secret-key -Error from server: admission webhook "validations.kong.konghq.com" denied the request: invalid credential type: wrong-auth -``` diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/aks.md b/app/kubernetes-ingress-controller/1.0.x/deployment/aks.md deleted file mode 100644 index 10cffc880e87..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/aks.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Kong Ingress on Azure Kubernetes Service (AKS) ---- - -## Requirements - -1. A fully functional AKS cluster. - Please follow Azure's Guide to - [set up an AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the AKS Kubernetes - cluster you'll work on. The above AKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It will take a few minutes for all containers to start and report -healthy status. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Set up environment variables - -Next, set up an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's set up an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Microsoft Azure to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/eks.md b/app/kubernetes-ingress-controller/1.0.x/deployment/eks.md deleted file mode 100644 index 78a47f8880c2..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/eks.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Kong Ingress on Elastic Kubernetes Service (EKS) ---- - -## Requirements - -1. A fully functional EKS cluster. - Please follow Amazon's Guide to - [set up an EKS cluster](https://aws.amazon.com/getting-started/projects/deploy-kubernetes-app-amazon-eks/). -2. Basic understanding of Kubernetes -3. A working `kubectl` linked to the EKS Kubernetes - cluster we will work on. The above EKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It may take a few minutes for all containers to start and report -healthy statuses. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, create an environment variable with the IP address at which -Kong is accessible. This IP address sends requests to the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 example.eu-west-1.elb.amazonaws.com 80:31929/TCP,443:31408/TCP 57d -``` - -Create an environment variable to hold the ELB hostname: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].hostname}" service -n kong kong-proxy) -``` - -> Note: It may take some time for Amazon to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## TLS configuration - -Versions of Kong prior to 2.0.0 default to using [the "modern" cipher suite -list](https://wiki.mozilla.org/Security/Server_Side_TLS). This is not -compatible with ELBs when the ELB terminates TLS at the edge and establishes a -new session with Kong. This error will appear in Kong's logs: - -``` -*7961 SSL_do_handshake() failed (SSL: error:1417A0C1:SSL routines:tls_post_process_client_hello:no shared cipher) while SSL handshaking -``` - -To correct this issue, set `KONG_SSL_CIPHER_SUITE=intermediate` in your -environment variables. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/gke.md b/app/kubernetes-ingress-controller/1.0.x/deployment/gke.md deleted file mode 100644 index 8d4c51c0b94d..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/gke.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Kong Ingress on Google Kubernetes Engine (GKE) ---- - -## Requirements - -1. A fully functional GKE cluster. - The easiest way to do this is to do it via the web UI: - Go to Google Cloud's console > Kubernetes Engine > Cluster > - Create a new cluster. - This documentation has been tested on a zonal cluster in - europe-west-4a using 1.10.5-gke.4 as Master version. - The default pool has been assigned 2 nodes of kind 1VCPU - with 3.75GB memory (default setting). - The OS used is COS (Container Optimized OS) and the auto-scaling - has been enabled. Default settings are being used except for - `HTTP load balancing` which has been disabled (you probably want to use - Kong features for this). For more information on GKE clusters, - refer to - [the GKE documentation](https://cloud.google.com/kubernetes-engine/docs/). -1. If you wish to use a static IP for Kong, you have to reserve a static IP - address (in Google Cloud's console > VPC network > - External IP addresses). For information, - you must create a regional IP - global is not supported as `loadBalancerIP` yet) -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the GKE Kubernetes - cluster we will work on. For information, you can associate a new `kubectl` - context by using: - - ```bash - gcloud container clusters get-credentials --zone --project - ``` - -## Update User Permissions - -> Because of [the way Kubernetes Engine checks permissions -when you create a Role or ClusterRole](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control), you must -first create a RoleBinding that grants you all of -the permissions included in the role you want to create. -An example workaround is to create a RoleBinding that -gives your Google identity a cluster-admin role -before attempting to create additional Role or -ClusterRole permissions. -This is a known issue in RBAC in Kubernetes and -Kubernetes Engine versions 1.6 and -later. - -A fast workaround: - -```yaml - -echo -n " -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: cluster-admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: User - name: # usually the Google account - # e.g.: example@testorg.com - namespace: kube-system" | kubectl apply -f - - -``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Google to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s-enterprise.md b/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s-enterprise.md deleted file mode 100644 index a22dec736ddb..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s-enterprise.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Kong for Kubernetes Enterprise ---- - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -## Prerequisites - -Before we can deploy Kong, we need to satisfy one prerequisite: create a license -secret. - -To create this secret, provision the `kong` namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -Enterprise version requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -If you do not have one, please contact your sales representative. -Save the license file temporarily to disk with filename `license` -and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -## Installers - -Once the secret is in-place, we can proceed with installation. - -Kong for Kubernetes can be installed using an installer of -your choice: - -### YAML manifests - -Execute the following to install Kong for Kubernetes Enterprise using YAML -manifests: - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml -``` - -It takes a few minutes the first time this setup is done. - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-6ffcf8c447-5qv6z 2/2 Running 1 44m -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.254.78 35.233.198.16 80:32697/TCP,443:32365/TCP 22h -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP address assigned to the service. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for your cloud provider to actually associate the -IP address to the `kong-proxy` Service. - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes Enterprise: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/enterprise-k8s -``` - -You can use the above URL as a base kustomization and build on top of it -as well. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong \ - --name demo --namespace kong \ - --values values.yaml - -# Helm 3 -$ helm install kong/kong --generate-name - --namespace kong \ - -f values.yaml \ - --set ingressController.installCRDs=false -``` - -### Example values.yaml -``` -image: - repository: kong/kong-gateway - tag: 2.2.1.0-alpine -env: - LICENSE_DATA: - valueFrom: - secretKeyRef: - name: kong-enterprise-license - key: license -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes Enterprise - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s.md b/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s.md deleted file mode 100644 index 429d6cc69dfe..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/k4k8s.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Kong for Kubernetes ---- - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -## Installers - -Kong for Kubernetes can be installed using an installer of -your choice. - -Once you've installed Kong for Kubernetes, -jump to the [next section](#using-kong-for-kubernetes) -on using it. - -### YAML manifests - -Please pick one of the following guides depending on your platform: - -- [Minikube](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/) -- [Google Kubernetes Engine(GKE) by Google](/kubernetes-ingress-controller/{{page.release}}/deployment/gke/) -- [Elastic Kubernetes Service(EKS) by Amazon](/kubernetes-ingress-controller/{{page.release}}/deployment/eks/) -- [Azure Kubernetes Service(AKS) by Microsoft](/kubernetes-ingress-controller/{{page.release}}/deployment/aks/) - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/base -``` - -You can use the above URL as a base kustomization and build on top of it -to make it suite better for your cluster and use-case. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes - -Once you've installed Kong for Kubernetes, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/kong-enterprise.md b/app/kubernetes-ingress-controller/1.0.x/deployment/kong-enterprise.md deleted file mode 100644 index e2f7648a283c..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/kong-enterprise.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -This guide walks through setting up the {{site.kic_product_name}} using Kong -Enterprise. This architecture is described in detail in [this doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise/). - -We assume that we start from scratch and you don't have {{site.ee_product_name}} -deployed. For the sake of simplicity, we will deploy {{site.ee_product_name}} and -its database in Kubernetes itself. You can safely run them outside -Kubernetes as well. - -## Prerequisites - -Before we can deploy the {{site.kic_product_name}} with {{site.ee_product_name}}, -we need to satisfy the following prerequisites: -- [{{site.ee_product_name}} License secret](#kong-enterprise-license-secret) -- [{{site.ee_product_name}} bootstrap password](#kong-enterprise-bootstrap-password) - -In order to create these secrets, let's provision the `kong` -namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -{{site.ee_product_name}} requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -Save the license file temporarily to disk and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -### {{site.ee_product_name}} bootstrap password - -Next, we need to create a secret containing the password using which we can login into Kong Manager. -Please replace `cloudnative` with a random password of your choice and note it down. - -```bash -$ kubectl create secret generic kong-enterprise-superuser-password -n kong --from-literal=password=cloudnative -secret/kong-enterprise-superuser-password created -``` - -Once these are created, we are ready to deploy {{site.ee_product_name}} -Ingress Controller. - -## Install - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/single/all-in-one-postgres-enterprise.yaml -``` - -It takes a little while to bootstrap the database. -Once bootstrapped, you should see the {{site.kic_product_name}} running with -{{site.ee_product_name}} as its core: - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-548b9cff98-n44zj 2/2 Running 0 21s -kong-migrations-pzrzz 0/1 Completed 0 4m3s -postgres-0 1/1 Running 0 4m3s -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-admin LoadBalancer 10.63.255.85 34.83.95.105 80:30574/TCP 4m35s -kong-manager LoadBalancer 10.63.247.16 34.83.242.237 80:31045/TCP 4m34s -kong-proxy LoadBalancer 10.63.242.31 35.230.122.13 80:32006/TCP,443:32007/TCP 4m34s -kong-validation-webhook ClusterIP 10.63.240.154 443/TCP 4m34s -postgres ClusterIP 10.63.241.104 5432/TCP 4m34s - -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP assigned to the three LoadBalancer type services. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. If you are running Minikube, you will not get an -external IP address. - -### Setup Kong Manager - -Next, if you browse to the IP address or host of the `kong-manager` service in your Browser, -which in our case is `http://34.83.242.237`. -Kong Manager should load in your browser. -Try logging in to the Manager with the username `kong_admin` -and the password you supplied in the prerequisite, it should fail. -The reason being we've not yet told Kong Manager where it can find the Admin API. - -Let's set that up. We will take the External IP address of `kong-admin` service and -set the environment variable `KONG_ADMIN_API_URI`: - -```bash -KONG_ADMIN_IP=$(kubectl get svc -n kong kong-admin --output=jsonpath='{.status.loadBalancer.ingress[0].ip}') -kubectl patch deployment -n kong ingress-kong -p "{\"spec\": { \"template\" : { \"spec\" : {\"containers\":[{\"name\":\"proxy\",\"env\": [{ \"name\" : \"KONG_ADMIN_API_URI\", \"value\": \"${KONG_ADMIN_IP}\" }]}]}}}}" -``` - -It will take a few minutes to roll out the updated deployment and once the new -`ingress-kong` pod is up and running, you should be able to log into the Kong Manager UI. - -As you follow along with other guides on how to use your newly deployed the {{site.kic_product_name}}, -you will be able to browse Kong Manager and see changes reflected in the UI as Kong's -configuration changes. - -## Using Kong for Kubernetes with {{site.ee_product_name}} - -Let's setup an environment variable to hold the IP address of `kong-proxy` service: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. - -## Customizing by use-case - -The deployment in this guide is a point to start using Ingress Controller. -Based on your existing architecture, this deployment will require custom -work to make sure that it needs all of your requirements. - -In this guide, there are three load-balancers deployed for each of -Kong Proxy, Kong Admin and Kong Manager services. It is possible and -recommended to instead have a single Load balancer and then use DNS names -and Ingress resources to expose the Admin and Manager services outside -the cluster. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/minikube.md b/app/kubernetes-ingress-controller/1.0.x/deployment/minikube.md deleted file mode 100644 index b6f0769f9767..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/minikube.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Kong Ingress on Minikube ---- - -## Setup Minikube - -1. Install [`minikube`](https://github.com/kubernetes/minikube) - - Minikube is a tool that makes it easy to run Kubernetes locally. - Minikube runs a single-node Kubernetes cluster inside a VM on your laptop - for users looking to try out Kubernetes or develop with it day-to-day. - -1. Start `minikube` - - ```bash - minikube start - ``` - - It will take a few minutes to get all resources provisioned. - - ```bash - kubectl get nodes - ``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -> Note: this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -```bash -$ export PROXY_IP=$(minikube service -n kong kong-proxy --url | head -1) -# If installed by helm, service name would be "-kong-proxy". -# $ export PROXY_IP=$(minikube service -kong-proxy --url | head -1) -$ echo $PROXY_IP -http://192.168.99.100:32728 -``` - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.0.x/deployment/overview.md b/app/kubernetes-ingress-controller/1.0.x/deployment/overview.md deleted file mode 100644 index 5a37579b5a99..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/deployment/overview.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Installing and Configuring ---- - -## Getting started - -If you are getting started with Kong for Kubernetes, -install it on Minikube using our Minikube [setup guide](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/). - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## Overview - -The {{site.kic_product_name}} can be installed on a local, managed -or any Kubernetes cluster which supports a service of type `LoadBalancer`. - -As explained in the [deployment document](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), there -are a variety of configurations and runtimes for the {{site.kic_product_name}}. - -The following sections detail on deployment steps for all the different -runtimes: - -## Kong for Kubernetes - - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s/) to deploy Kong for Kubernetes -using an installation method of your choice. - -## Kong for Kubernetes Enterprise - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) to deploy Kong for Kubernetes -Enterprise if you have purchased or are trying out {{site.ee_product_name}}. - -## Kong for Kubernetes with {{site.ee_product_name}} - -Kong for Kubernetes can integrate with {{site.ee_product_name}} to -provide a single pane of visibility across all of your services -that are running in Kubernetes and non-Kubernetes environments. - -This [guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise/) goes into details of -the architecture and how one can set that up. - -## Admission Controller - -The {{site.kic_product_name}} also ships with a Validating -Admission Controller that -can be enabled to verify KongConsumer, KongPlugin and Secret -resources as they are created. -Please follow the [admission-webhook](/kubernetes-ingress-controller/{{page.release}}/deployment/admission-webhook/) deployment -guide to set it up. diff --git a/app/kubernetes-ingress-controller/1.0.x/faq.md b/app/kubernetes-ingress-controller/1.0.x/faq.md deleted file mode 100644 index 8f401a8b6e3c..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/faq.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: FAQs ---- - -### Why endpoints and not services? - -The {{site.kic_product_name}} does not use -[Services][k8s-service] to route traffic -to the pods. Instead, it uses the Endpoints API -to bypass [kube-proxy][kube-proxy] -to allow Kong features like session affinity and -custom load balancing algorithms. -It also removes overhead -such as conntrack entries for iptables DNAT. - -### Is it possible to create consumers using the Admin API? - -From version 0.5.0 onwards, the {{site.kic_product_name}} tags each entity -that it manages inside Kong's database and only manages the entities that -it creates. -This means that if consumers and credentials are created dynamically, they -won't be deleted by the Ingress Controller. - -[k8s-service]: https://kubernetes.io/docs/concepts/services-networking/service -[kube-proxy]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/cert-manager.md b/app/kubernetes-ingress-controller/1.0.x/guides/cert-manager.md deleted file mode 100644 index f610f8ae248e..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/cert-manager.md +++ /dev/null @@ -1,372 +0,0 @@ ---- -title: Using cert-manager for automated TLS certificate ---- - -This guide will walk through steps to set up the {{site.kic_product_name}} with -cert-manager to automate certificate management using Let's Encrypt. -Any ACME-based CA can be used in-place of Let's Encrypt as well. - -## Before you begin - -You will need the following: - -- Kubernetes cluster that can provision an IP address that is routable from - the Internet. If you don't have one, you can use GKE or any managed k8s - cloud offering. -- A domain name for which you control the DNS records. - This is necessary so that - Let's Encrypt can verify the ownership of the domain and issue a certificate. - In the current guide, we use `example.com`, please replace this with a domain - you control. - -This tutorial was written using Google Kubernetes Engine. - -## Set up the {{site.kic_product_name}} {#set-up-kic} - -Execute the following to install the Ingress Controller: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.example.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -## Set up cert-manager - -Please follow cert-manager's [documentation](https://cert-manager.io/docs/installation/) -on how to install cert-manager onto your cluster. - -Once installed, verify all the components are running using: - -```bash -kubectl get all -n cert-manager -NAME READY STATUS RESTARTS AGE -pod/cert-manager-86478c5ff-mkhb9 1/1 Running 0 23m -pod/cert-manager-cainjector-65dbccb8b6-6dnjl 1/1 Running 0 23m -pod/cert-manager-webhook-78f9d55fdf-5wcnp 1/1 Running 0 23m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cert-manager-webhook ClusterIP 10.63.240.251 443/TCP 23m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/cert-manager 1 1 1 1 23m -deployment.apps/cert-manager-cainjector 1 1 1 1 23m -deployment.apps/cert-manager-webhook 1 1 1 1 23m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cert-manager-86478c5ff 1 1 1 23m -replicaset.apps/cert-manager-cainjector-65dbccb8b6 1 1 1 23m -replicaset.apps/cert-manager-webhook-78f9d55fdf 1 1 1 23m -``` - -## Set up your application - -Any HTTP-based application can be used, for the purpose of the demo, install -the following echo server: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Set up DNS - -Get the IP address of the load balancer for Kong: - -```bash -$ kubectl get service -n kong kong-proxy -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 35.233.170.67 80:31929/TCP,443:31408/TCP 58d -``` - -To get only the IP address: - -```bash -$ kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy -35.233.170.67 -``` - -Please note that the IP address in your case will be different. - -Next, setup a DNS records to resolve `proxy.example.com` to the -above IP address: - -```bash -$ dig +short proxy.example.com -35.233.170.67 -``` - -Next, setup a CNAME DNS record to resolve `demo.example.com` to -`proxy.example.com`. - -```bash -$ dig +short demo.example.com -proxy.example.com. -35.233.170.67 -``` - -## Expose your application to the Internet - -Setup an Ingress rule to expose the application: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -Access your application: - -```bash -$ curl -I demo.example.com -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 21:14:45 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 -``` - -## Request TLS Certificate from Let's Encrypt - -First, setup a ClusterIssuer for cert-manager - -```bash -$ echo "apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod - namespace: cert-manager -spec: - acme: - email: user@example.com #please change this - privateKeySecretRef: - name: letsencrypt-prod - server: https://acme-v02.api.letsencrypt.org/directory - solvers: - - http01: - ingress: - class: kong" | kubectl apply -f - -clusterissuer.cert-manager.io/letsencrypt-prod configured -``` - -*Note*: If you run into issues configuring this, -be sure that the group (`cert-manager.io`) and -version (`v1`) match those in the output of -`kubectl describe crd clusterissuer`. -This directs cert-manager which CA authority to use to issue the certificate. - -Next, update your Ingress resource to provision a certificate and then use it: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: kong -spec: - tls: - - secretName: demo-example-com - hosts: - - demo.example.com - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-example-com configured -``` - -Things to note here: - -- The annotation `kubernetes.io/tls-acme` is set to `true`, informing - cert-manager that it should provision a certificate for hosts in this - Ingress using ACME protocol. -- `certmanager.k8s.io/cluster-issuer` is set to `letsencrypt-prod`, directing - cert-manager to use Let's Encrypt's production server to provision a TLS - certificate. -- `tls` section of the Ingress directs the {{site.kic_product_name}} to use the - secret `demo-example-com` to encrypt the traffic for `demo.example.com`. - This secret will be created by cert-manager. - -Once you update the Ingress resource, cert-manager will start provisioning -the certificate and in sometime the certificate will be available for use. - -You can track the progress of certificate issuance: - -```bash -$ kubectl describe certificate demo-example-com -Name: demo-example-com -Namespace: default -Labels: -Annotations: -API Version: certmanager.k8s.io/v1 -Kind: Certificate -Metadata: - Creation Timestamp: 2019-06-21T20:41:54Z - Generation: 1 - Owner References: - API Version: extensions/v1beta1 - Block Owner Deletion: true - Controller: true - Kind: Ingress - Name: demo-example-com - UID: 261d15d3-9464-11e9-9965-42010a8a01ad - Resource Version: 19561898 - Self Link: /apis/certmanager.k8s.io/v1/namespaces/default/certificates/demo-example-com - UID: 014d3f1d-9465-11e9-9965-42010a8a01ad -Spec: - Acme: - Config: - Domains: - demo.example.com - Http 01: - Dns Names: - demo.example.com - Issuer Ref: - Kind: ClusterIssuer - Name: letsencrypt-prod - Secret Name: demo-example-com -Status: - Conditions: - Last Transition Time: 2019-06-21T20:42:20Z - Message: Certificate is up to date and has not expired - Reason: Ready - Status: True - Type: Ready - Not After: 2019-09-19T19:42:19Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Generated 53m cert-manager Generated new private key - Normal GenerateSelfSigned 53m cert-manager Generated temporary self signed certificate - Normal OrderCreated 53m cert-manager Created Order resource "demo-example-com-3811625818" - Normal OrderComplete 53m cert-manager Order "demo-example-com-3811625818" completed successfully - Normal CertIssued 53m cert-manager Certificate issued successfully -``` - -## Test HTTPS - -Once all is in place, you can use HTTPS: - -```bash -$ curl -v https://demo.example.com -* Rebuilt URL to: https://demo.example.com/ -* Trying 35.233.170.67... -* TCP_NODELAY set -* Connected to demo.example.com (35.233.170.67) port 443 (#0) -* ALPN, offering h2 -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/cert.pem - CApath: none -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=demo.example.com -* start date: Jun 21 19:42:19 2019 GMT -* expire date: Sep 19 19:42:19 2019 GMT -* subjectAltName: host "demo.example.com" matched cert's "demo.example.com" -* issuer: C=US; O=Let's Encrypt; CN=Let's Encrypt Authority X3 -* SSL certificate verify ok. -> GET / HTTP/1.1 -> Host: demo.example.com -> User-Agent: curl/7.54.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Connection: keep-alive -< Date: Fri, 21 Jun 2019 21:37:43 GMT -< Server: echoserver -< X-Kong-Upstream-Latency: 1 -< X-Kong-Proxy-Latency: 1 -< Via: kong/1.1.2 -< - - -Hostname: echo-d778ffcd8-52ddj - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-9w4t - pod name: echo-d778ffcd8-52ddj - pod namespace: default - pod IP:10.60.2.246 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.2.239 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://demo.example.com:8080/ - -Request Headers: - accept=*/* - connection=keep-alive - host=demo.example.com - user-agent=curl/7.54.0 - x-forwarded-for=10.138.0.6 - x-forwarded-host=demo.example.com - x-forwarded-port=8443 - x-forwarded-proto=https - x-real-ip=10.138.0.6 - -Request Body: - -no body in request- -``` - -Et voilà ! You've secured your API with HTTPS -with the {{site.kic_product_name}} and cert-manager. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/configure-acl-plugin.md b/app/kubernetes-ingress-controller/1.0.x/guides/configure-acl-plugin.md deleted file mode 100644 index 2ef8aecd4249..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/configure-acl-plugin.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -title: Configuring ACL Plugin ---- - -This guide walks through configuring the Kong ACL Plugin. The ACL Plugin -requires the use of at least one Authentication plugin. This example will use -the JWT Auth Plugin - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create two Ingress rules to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Test the Ingress rules: - -```bash -$ curl -i $PROXY_IP/get -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -``` - -## Add JWT authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. Let's enable JWT authentication - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: app-jwt -plugin: jwt -" | kubectl apply -f - -``` - -Now let's associate the plugin to the Ingress rules we created earlier. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any requests matching the proxying rules for `demo-get` and `demo` post will -now require a valid JWT and the consumer for the JWT to be associate with the -right ACL. - -```bash -$ curl -i $PROXY_IP/get - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} -``` - -You should get a 401 response telling you that the request is not authorized. - -## Provision Consumers - -Let's provision 2 KongConsumer resources: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -" | kubectl apply -f - -``` - -## Secrets - -Next, let's provision some Secrets for the KongConsumers to reference. Each -ACL will need its own Secret and each JWT public key will need its own Secret. -The credential type is specified in the `kongCredType` field. In this -case we'll be using `jwt` and `acl`. You can create a secret using any other -method as well. - -The JWT signing algorithm is set in the `algorithm` field. The if using a -public key like this example it is stored in the `rsa_pulic_key` field. If you -are using a secret signing key, use the `secret` field. The last field to set -if you are using `RS256` or `ES256` is the `key` field. This should match the -`iss` field in the JWT you will be sending. You can check this value by -decoding your JWT over at [https://jwt.io](https://jwt.io) - -Since we are using the Secret resource, Kubernetes will encrypt and store the -JWT signing key and ACL group for us. - -### JWT signing key - -```bash -# create secret for jwt public key -$ kubectl create secret \ - generic app-admin-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="admin-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - MIIBIjA.... - -----END PUBLIC KEY-----" - -# create a second secret with a different key -$ kubectl create secret \ - generic app-user-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="user-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - qwerlkjqer.... - -----END PUBLIC KEY-----" -``` - -Validation requirements impose that even if the `secret` is not used for algorithm -`RS256` or `ES256` the field `secret` must be present, so put some dummy value for it. - -## Assign the credentials - -In order to for the ACL and JWT to be validated by Kong, the secrets will need -to be referenced by the KongConsumers we created earlier. Let's update those. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt -" | kubectl apply -f - -``` - -## Use the credential - -Now to use a JWT to pass authentication. Let's store the user and admin jwt's -in some environment variables. `USER_JWT` and `ADMIN_JWT`. If you are using -an identity provider, you should be able to login and get out a JWT from their -API. If you are generating your own, go through the process of generating your -own. - -Let's test the get route - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - - -``` - -## Adding ACL's - -The JWT plugin doesn't provide the ability to authroize a given issuer to a -given ingress. To do this we need to use the ACL plugin. Let's create an admin -ACL config - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: admin-acl -plugin: acl -config: - whitelist: ['app-admin'] -" | kubectl apply -f - -``` - -Then let's create a user ACL config. We want our admin to be able to access -the same resources as the user, so let's make sure we include them in the -whitelist. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: plain-user-acl -plugin: acl -config: - whitelist: ['app-user','app-admin'] -" | kubectl apply -f - -``` - -Next let's create the secrets that will define the ACL groups. - -```bash -# create secrets for acl groups -$ kubectl create secret \ - generic app-admin-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-admin - -$ kubectl create secret \ - generic app-user-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-user -``` - -After we create the secrets, the consumers need to be updated to reference the -ACL credentials - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt - - app-admin-acl -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt - - app-user-acl -" | kubectl apply -f - -``` - -The last thing to configure is the ingress to use the new plguins. Note, if you -set more than one ACL plugin, the last one supplied will be the only one -evaluated. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt,plain-user-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt,admin-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Now let's test it. - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-user", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post -HTTP/1.1 403 Forbidden -Date: Mon, 06 Apr 2020 07:11:59 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 45 -X-Kong-Response-Latency: 1 -Server: kong/2.0.2 - -{"message":"You cannot consume this service"} -``` - -The `plain-user` user is not in the `admin-acl` whitelist, and is therefore -unauthorized to access the resource - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 1156 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 07:20:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 4 -X-Kong-Proxy-Latency: 4 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} -``` diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-custom-entities.md b/app/kubernetes-ingress-controller/1.0.x/guides/configuring-custom-entities.md deleted file mode 100644 index 747286a87864..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-custom-entities.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Configuring Custom Entities ---- - -This is an **advanced-level** guide for users using custom entities in Kong. -Most users do not need to use this feature. - -Kong has in-built extensibility with its plugin architecture. -Plugins in Kong have a `config` property where users can store configuration -for any custom plugin and this suffices in most use cases. -In some use cases, plugins define custom entities to store additional -configuration outside the plugin instance itself. -This guide elaborates on how such custom entities can be used with the Kong -Ingress Controller. - -> Note: All entities shipped with Kong are supported by the -{{site.kic_product_name}}out of the box. This guide applies only if you have a -custom entity in your plugin. To check if your plugin contains a custom entity, -the source code will usually contain a `daos.lua` file. -Custom plugins have first-class support in the {{site.kic_product_name}} -via the `KongPlugin` CRD. -Please read [the custom plugin guide](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) instead -if you are only using Custom plugins. - -## Caveats - -- The feature discussed in this guide apply for DB-less deployments of Kong. - The feature is not supported for deployments where Kong is used with a - database or Kong is used in hybrid mode. - For these deployments, configure custom entities directly using Kong's Admin - API. -- Custom entities which have a foreign relation with other core entities in Kong - are not supported. Only entities which can exist by themselves and then - be referenced via plugin configuration are supported. - -## Creating a JSON representation of the custom entity - -In this section, we will learn how to create a JSON representation of -a custom entity. - -Suppose you have a custom entity with the following schema in your plugin source: - -```lua -{ - name = "xkcds", - primary_key = { "id" }, - cache_key = { "name" }, - endpoint_key = "name", - fields = { - { id = typedefs.uuid }, - { - name = { - type= "string", - required = true, - unique = true, - }, - }, - { - url = { - type = "string", - required = true, - }, - }, - { created_at = typedefs.auto_timestamp_s }, - { updated_at = typedefs.auto_timestamp_s }, - }, -} -``` - -An instance of such an entity would look like: - -```json -{ - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "Bobby Drop Tables", - "url": "https://xkcd.com/327/" -} -``` - -Multiple instances of such an entity are represented as follows: - -```json -{ - "xkcds": [ - { - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "bobby_tables", - "url": "https://xkcd.com/327/" - }, - { - "id": "d079a632-ac8d-4a9a-860c-71de82e8fc11", - "name": "compiling", - "url": "https://xkcd.com/303/" - } - ] -} -``` - -If you have more than one custom entities that you would like to configure -then you can create other entities by specifying the entity name at the root -level of the JSON as the key and then a JSON array containing the -custom entities as the value of the key. - -To configure custom entities in a DB-less instance of Kong, -you first need to create such a JSON representation of your entities. - -## Configuring the custom entity secret - -Once you have the JSON representation, we need to store the configuration -inside a Kubernetes Secret. -The following command assumes the filename to be `entities.json` but you can -use any other filename as well: - -```bash -$ kubectl create secret generic -n kong kong-custom-entities --from-file=config=entities.json -secret/kong-custom-entities created -``` - -Some things to note: -- The key inside the secret must be `config`. This is not configurable at the - moment. -- The secret must be accessible by the Ingress Controller. The recommended - practice here is to install the secret in the same namespace in which Kong - is running. - -## Configure the Ingress Controller - -Once you have the secret containing the custom entities configured, -you need to instruct the controller to read the secret and sync the custom -entities to Kong. - -To do this, you need to add the following environment variable to the -`ingress-ccontroller` container: - -```yaml -env: -- name: CONTROLLER_KONG_CUSTOM_ENTITIES_SECRET - value: kong/kong-custom-entities -``` - -This value of the environment variable takes the form of `/`. -You need to configure this only once. - -This instructs the controller to watch the above secret and configure Kong -with any custom entities present inside the secret. -If you change the configuration and update the secret with different entities, -the controller will dynamically fetch the updated secret and configure Kong. - -## Verification - -You can verify that the custom entity was actually created in Kong's memory -using the `GET /xkcds` (endpoint will differ based on the name of the entity) -on Kong's Admin API. -You can forward traffic from your local machine to the Kong Pod to access it: - -```bash -$ kubectl port-forward -n kong KONG-POD-NAME 8444:8444 -``` - -and in a separate terminal: - -```bash - $ curl -k https://localhost:8444/ -``` - -## Using the custom entity - -You can now use reference the custom entity in any of your custom plugin's -`config` object: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: random-xkcd-header -config: - xkcds: - - d079a632-ac8d-4a9a-860c-71de82e8fc11 -plugin: xkcd-header -``` diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-fallback-service.md b/app/kubernetes-ingress-controller/1.0.x/guides/configuring-fallback-service.md deleted file mode 100644 index 46d238e4b6ad..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-fallback-service.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Configuring a fallback service ---- - -This guide walks through how to setup a fallback service using Ingress -resource. The fallback service will receive all requests that don't -match against any of the defined Ingress rules. -This can be useful for scenarios where you would like to return a 404 page -to the end user if the user clicks on a dead link or inputs an incorrect URL. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup a simple HTTP service in the -cluster and proxy it. - -```bash -$ echo ' -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fallback-svc -spec: - replicas: 1 - selector: - matchLabels: - app: fallback-svc - template: - metadata: - labels: - app: fallback-svc - spec: - containers: - - name: fallback-svc - image: hashicorp/http-echo - args: - - "-text" - - "This is not the path you are looking for. - Fallback service" - ports: - - containerPort: 5678 ---- -apiVersion: v1 -kind: Service -metadata: - name: fallback-svc - labels: - app: fallback-svc -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 5678 - protocol: TCP - name: http - selector: - app: fallback-svc -' | kubectl apply -f - -``` - -Result: - -```bash -deployment.apps/fallback-svc created -service/fallback-svc created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup a fallback service - -Let's deploy another sample service service: - -```bash -$ kubectl apply -f https://bit.ly/fallback-svc -deployment.extensions/fallback-svc created -service/fallback-svc created -``` - -Next, let's set up an Ingress rule to make it the fallback service -to send all requests to it that don't match any of our Ingress rules: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: fallback - annotations: - kubernetes.io/ingress.class: kong -spec: - backend: - serviceName: fallback-svc - servicePort: 80 -" | kubectl apply -f - -``` - -## Test it - -Now send a request with a request property that doesn't match against -any of the defined rules: - -```bash -$ curl $PROXY_IP/random-path -This is not the path you are looking for. - Fallback service -``` - -The above message comes from the fallback service that was deployed in the -last step. - -Create more Ingress rules, some complicated regex based ones and -see how requests that don't match any rules, are forwarded to the -fallback service. - -You can also use Kong's request-termination plugin on the `fallback` -Ingress resource to terminate all requests at Kong, without -forwarding them inside your infrastructure. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-health-checks.md b/app/kubernetes-ingress-controller/1.0.x/guides/configuring-health-checks.md deleted file mode 100644 index 875525104609..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-health-checks.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: Setting up Active and Passive health checks ---- - -In this guide, we will go through steps necessary to setup active and passive -health checking using the {{site.kic_product_name}}. This configuration allows -Kong to automatically short-circuit requests to specific Pods that are -mis-behaving in your Kubernetes Cluster. - -> Please make sure to use {{site.kic_product_name}} >= 0.6 as the previous -versions contain a [bug](https://github.com/hbagdi/go-kong/issues/6). - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy any requests yet. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Observe the headers and you can see that Kong has proxied the request correctly. - -## Setup passive health checking - -Now, let's setup passive HTTP health-check for our service. -All health-checking is done at Service-level and not Ingress-level. - -Add the following KongIngress resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking created -``` - -Here, we are configuring Kong to short-circuit requests to a pod -if a pod throws 3 consecutive errors. - -Next, associate the KongIngress resource with `httpbin` service: - -```bash -$ kubectl patch svc httpbin -p '{"metadata":{"annotations":{"konghq.com/override":"demo-health-checking"}}}' -service/httpbin patched -``` - -Now, let's send some traffic to test if this works: - -Let's send 2 requests that represent a failure from upstream -and then send a request for 200. -Here we are using `/status/500` to simulate a failure from upstream. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Kong has not short-circuited because there were only two failures. -Let's send 3 requests and open the circuit, and then send a normal request. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 22:41:19 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} - -``` - -As we can see, Kong returns back a 503, representing that the service is -unavailable. Since we have only one pod of httpbin running in our cluster, -and that is throwing errors, Kong will not proxy anymore requests. - -Now we have a few options: - -- Delete the current httpbin pod; Kong will then proxy requests to the new - pod that comes in its place. -- Scale the httpbin deployment; Kong will then proxy requests to the new - pods and leave the short-circuited pod out of the loop. -- Manually change the pod health status in Kong using Kong's Admin API. - -These options highlight the fact that once a circuit is opened because of -errors, there is no way for Kong to close the circuit again. - -This is a feature which some services might need, where once a pod starts -throwing errors, manual intervention is necessary before that pod can -again handle requests. -To get around this, we can introduce active health-check, where each instance -of Kong actively probes pods to figure out if they are healthy or not. - -## Setup active health checking - -Let's update our KongIngress resource to use active health-checks: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - active: - healthy: - interval: 5 - successes: 3 - http_path: /status/200 - type: http - unhealthy: - http_failures: 1 - interval: 5 - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking configured -``` - -Here, we are configuring Kong to actively probe `/status/200` every 5 seconds. -If a pod is unhealthy (from Kong's perspective), -3 successful probes will change the status of the pod to healthy and Kong -will again start to forward requests to that pod. - -Now, the requests should flow once again: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Let's trip the circuit again by sending three requests that will return -500s from httpbin: - -```bash -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -``` - -Now, sending the following request will fail for about 15 seconds, -the duration it will take active healthchecks to re-classify -the httpbin pod as healthy again. - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 23:17:47 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} -``` - -After 15 seconds, you will see: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As we can see, active health-checks automatically marked a pod as healthy -when passive health-checks marked it unhealthy. - -## Bonus - -Scale the `httpbin` and `ingress-kong` deployments and observe how -multiple pods change the outcome of the above demo. - -Read more about health-checks and ciruit breaker in Kong's -[documentation](/gateway/latest/reference/health-checks-circuit-breakers). diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-https-redirect.md b/app/kubernetes-ingress-controller/1.0.x/guides/configuring-https-redirect.md deleted file mode 100644 index 513d0caba902..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/configuring-https-redirect.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Configuring https redirect ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -redirect HTTP request to HTTPS so that all communication -from the external world to your APIs and microservices is encrypted. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup HTTPS redirect - -To instruct Kong to redirect all HTTP requests matching this Ingress rule to -HTTPS, update its annotations to limit its protocols to HTTPS only and -issue a 301 redirect: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"https","konghq.com/https-redirect-status-code":"301"}}}' -ingress.extensions/demo patched -``` - -## Test it - -Now, making a plain-text HTTP request to Kong will result in a redirect -being issued from Kong: - -```bash -$ curl $PROXY_IP/foo/headers -I -HTTP/1.1 301 Moved Temporarily -Date: Tue, 06 Aug 2019 18:04:38 GMT -Content-Type: text/html -Content-Length: 167 -Connection: keep-alive -Location: https://35.197.125.63/foo/headers -Server: kong/1.2.1 -``` - -The `Location` header will contain the URL you need to use for an HTTPS -request. Please note that this URL will be different depending on your -installation method. You can also grab the IP address of the load balancer -fronting Kong and send a HTTPS request to test it. - -Let's test it: - -```bash -$ curl -k https://35.197.125.63/foo/headers -{ - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Host": "35.197.125.63", - "User-Agent": "curl/7.54.0", - "X-Forwarded-Host": "35.197.125.63" - } -} -``` - -We can see that Kong correctly serves the request only on HTTPS protocol -and redirects the user if plaint-text HTTP protocol is used. -We had to use `-k` flag in cURL to skip certificate validation as the -certificate served by Kong is a self-signed one. -If you are serving this traffic via a domain that you control and have -configured TLS properties for it, then the flag won't -be necessary. - -If you have a domain that you control but don't have TLS/SSL certificates -for it, please check out out -[Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager) guide which can get TLS -certificates setup for you automatically. And it's free, thanks to -Let's Encrypt! diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/getting-started-istio.md b/app/kubernetes-ingress-controller/1.0.x/guides/getting-started-istio.md deleted file mode 100644 index 19ef1a26341d..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/getting-started-istio.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Running the Kong Ingress Controller with Istio ---- - -In this guide, you will: -* Install Istio v1.6.7 and Kong in your cluster. -* Deploy an example Istio-enabled application (_bookinfo_). -* Deploy an `Ingress` customized with a `KongPlugin` for the example application. -* Make several requests to the sample application via Kong and Istio. -* See the performance metrics of the sample application, provided by Istio. - -### Prerequisites -For this guide, you will need: - -* A Kubernetes v1.15 (or newer) cluster which can pull container images from public registries. For example, you can use: - * A managed Kubernetes cluster (AWS EKS, Google Cloud GKE, Azure AKS). - * Minikube. - * `microk8s` with the `dns` addon enabled. -* `kubectl` with admin access to the cluster. - -### Download Istio - -Download the Istio bundle at version 1.6.7: - -```console -$ curl -L https://istio.io/downloadIstio | env ISTIO_VERSION=1.6.7 sh - -... -... -Istio 1.6.7 Download Complete! - -Istio has been successfully downloaded into the istio-1.6.7 folder on your system. -... -... -``` - -### Install Istio Operator - -Invoke `istioctl` to deploy the Istio Operator to the Kubernetes cluster: - -```console -$ ./istio-1.6.7/bin/istioctl operator init -Using operator Deployment image: docker.io/istio/operator:1.6.7 -✔ Istio operator installed -✔ Installation complete -``` - -### Deploy Istio using Operator - -Deploy Istio using Istio Operator: - -```console -$ kubectl create namespace istio-system -namespace/istio-system created -``` -```console -$ kubectl apply -f - < 8000 -Forwarding from [::1]:8080 -> 8000 -... -``` - -Navigate your web browser to `http://localhost:8080/` You should be able to see a bookstore web application. Click -through any available links several times. As you hit 30 requests per minute (for example, by holding down the "Refresh" -key combination, e.g. `` or ``), you should obtain a `Kong Error - API rate limit exceeded` response. - -### See the connection graph in Kiali - -Connect to Kiali (the Istio dashboard): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/kiali 20001:20001 -n istio-system -Forwarding from 127.0.0.1:20001 -> 20001 -Forwarding from [::1]:20001 -> 20001 -... -``` - -* Navigate your web browser to `http://localhost:20001/`. -* Log in using the default credentials (`admin`/`admin`). -* Choose _Workloads_ from the menu on the left. -* Select `my-istio-app` in the _Namespace_ drop-down menu. -* Click the _productpage-v1_ service name. -* Click the three dots button in the top-right corner of _Graph Overview_ and click _Show full graph_. -* Select `kong-istio` alongside `my-istio-app` in the _Namespace_ diagram. -* Observe a connection graph spanning from `example-kong-kong-proxy` through `productpage-v1` to the other sample -application services such as `ratings-v1` and `details-v1`. - -### See the metrics in Grafana - -Connect to Grafana (a dashboard frontend for Prometheus which has been deployed with Istio): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/grafana 3000:3000 -n istio-system -Forwarding from 127.0.0.1:3000 -> 3000 -Forwarding from [::1]:3000 -> 3000 -... -``` - -* Navigate your web browser to `http://localhost:3000/`. -* Expand the dashboard selection drop-down menu from the top of the screen. Expand the `istio` directory and choose the -_Istio Workload Dashboard_ from the list. -* Choose _Namespace: my-istio-app_ and _Workload: productpage-v1_ from the drop-downs. -* Choose a timespan in the top-right of the page to include the time when you made requests to the sample application (e.g. _Last 1 hour_). -* Observe the incoming and outgoing request graphs reflecting actual requests from Kong to `productpage-v1`, and from `productpage-v1` to its backends. - -Note that the requests from the web browser to Kong are not reflected in inbound stats of `example-kong-kong-proxy` -because we've issued these requests by `kubectl port-forward`, thus bypassing the Istio proxy sidecar in Kong. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/getting-started.md b/app/kubernetes-ingress-controller/1.0.x/guides/getting-started.md deleted file mode 100644 index 2de9ca9d32fc..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/getting-started.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Getting started with the Kong Ingress Controller ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return back -a HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.1.2 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy the request yet. - -## Set up an echo-server - -Setup an echo-server application to demonstrate how -to use the {{site.kic_product_name}}: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -This application just returns information about the -pod and details from the HTTP request. - -## Basic proxy - -Create an Ingress rule to proxy the echo-server created previously: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 17:12:49 GMT -Server: echoserver -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-txt52 - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-txt52 - pod namespace: default - pod IP: 172.17.0.14 -<-- clipped --> -``` - -If everything is deployed correctly, you should see the above response. -This verifies that Kong can correctly route traffic to an application running -inside Kubernetes. - -## Using plugins in Kong - -Setup a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -kongplugin.configuration.konghq.com/request-id created -``` - -Create a new Ingress resource which uses this plugin: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -The above resource directs Kong to execute the request-id plugin whenever -a request is proxied matching any rule defined in the resource. - -Send a request to Kong: - -```bash -$ curl -i -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:09:02 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-cnfmx - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-cnfmx - pod namespace: default - pod IP: 172.17.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=172.17.0.2 - method=GET - real path=/bar/sample - query= - request_version=1.1 - request_scheme=http - request_uri=http://example.com:8080/bar/sample - -Request Headers: - accept=*/* - connection=keep-alive - host=example.com - my-request-id=7250803a-a85a-48da-94be-1aa342ca276f#6 - user-agent=curl/7.54.0 - x-forwarded-for=172.17.0.1 - x-forwarded-host=example.com - x-forwarded-port=8000 - x-forwarded-proto=http - x-real-ip=172.17.0.1 - -Request Body: - -no body in request- -``` - -The `my-request-id` can be seen in the request received by echo-server. -It is injected by Kong as the request matches one -of the Ingress rules defined in `demo-example-com` resource. - -## Using plugins on Services - -Kong Ingress allows plugins to be executed on a service level, meaning -Kong will execute a plugin whenever a request is sent to a specific k8s service, -no matter which Ingress path it came from. - -Create a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: rl-by-ip -config: - minute: 5 - limit_by: ip - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/rl-by-ip created -``` - -Next, apply the `konghq.com/plugins` annotation on the Kubernetes Service -that needs rate-limiting: - -```bash -kubectl patch svc echo \ - -p '{"metadata":{"annotations":{"konghq.com/plugins": "rl-by-ip\n"}}}' -``` - -Now, any request sent to this service will be protected by a rate-limit -enforced by Kong: - -```bash -$ curl -I $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:25:49 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 2 -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 4 -Via: kong/1.1.2 - -$ curl -I -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:28:30 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 2 -Via: kong/1.1.2 -``` - -## Result - -This guide sets up the following configuration: - -```text -HTTP requests with /foo -> Kong enforces rate-limit -> echo server - -HTTP requests with /bar -> Kong enforces rate-limit + -> echo-server - on example.com injects my-request-id header -``` diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/overview.md b/app/kubernetes-ingress-controller/1.0.x/guides/overview.md deleted file mode 100644 index ca89c71096df..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/overview.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Guides ---- - -Follow one of the guides to learn more about how to use -the {{site.kic_product_name}}: - -- [Getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started/) with the {{site.kic_product_name}} -- [Getting started using Istio](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started-istio/) with the {{site.kic_product_name}} and Istio -- [Using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins in Kong using a declarative - approach. -- [Using KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource/) - This guide explains how the KongIngress resource can be used to change Kong - specific settings like load-balancing, health-checking and proxy behaviour. -- [Using KongConsumer and Credential Resources](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) - This guide walks through how Kubernetes native declarative configuration - can be used to dynamically provision credentials for authentication purposes - in the Ingress layer. -- [Using JWT and ACL KongPlugin resources](/kubernetes-ingress-controller/{{page.release}}/guides/configure-acl-plugin/) - This guides walks you through configuring the JWT plugin and ACL plugin for - authentication purposes at the Ingress layer -- [Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager/) - This guide walks through how to use cert-manager along with Kong Ingress - Controller to automate TLS certificate provisioning and using them - to encrypt your API traffic. -- [Configuring a fallback service](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-fallback-service/) - This guide walks through how to setup a fallback service using Ingress - resource. The fallback service will receive all requests that don't - match against any of the defined Ingress rules. -- [Using external service](/kubernetes-ingress-controller/{{page.release}}/guides/using-external-service/) - This guide shows how to expose services running outside Kubernetes via Kong, - using [External Name](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) - Services in Kubernetes. -- [Configuring HTTPS redirects for your services](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-https-redirect/) - This guide walks through how to configure the {{site.kic_product_name}} to - redirect HTTP request to HTTPS so that all communication - from the external world to your APIs and microservices is encrypted. -- [Using Redis for rate-limiting](/kubernetes-ingress-controller/{{page.release}}/guides/redis-rate-limiting/) - This guide walks through how to use Redis for storing rate-limit information - in a multi-node Kong deployment. -- [Integrate the {{site.kic_product_name}} with Prometheus/Grafana](/kubernetes-ingress-controller/{{page.release}}/guides/prometheus-grafana/) - This guide walks through the steps of how to deploy the {{site.kic_product_name}} - and Prometheus to obtain metrics for the traffic flowing into your - Kubernetes cluster. -- [Configuring circuit-breaker and health-checking](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-health-checks/) - This guide walks through the usage of Circuit-breaking and health-checking - features of the {{site.kic_product_name}}. -- [Setting up custom plugin](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) - This guide walks through - installation of a custom plugin into Kong using - ConfigMaps and Volumes. -- [Using ingress with gRPC](/kubernetes-ingress-controller/{{page.release}}/guides/using-ingress-with-grpc/) - This guide walks through how to use the {{site.kic_product_name}} with gRPC. -- [Setting up upstream mTLS](/kubernetes-ingress-controller/{{page.release}}/guides/upstream-mtls/) - This guide gives an overview of how to setup mutual TLS authentication - between Kong and your upstream server. -- [Preserving Client IP address](/kubernetes-ingress-controller/{{page.release}}/guides/preserve-client-ip/) - This guide gives an overview of different methods to preserve the Client - IP address. -- [Using KongClusterPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins that can be shared across - Kubernetes namespaces. -- [Using Kong with Knative](/kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/) - This guide gives an overview of how to setup Kong as the Ingress point - for Knative workloads. -- [Exposing TCP-based service](/kubernetes-ingress-controller/{{page.release}}/guides/using-tcpingress/) - This guide gives an overview of how to use TCPIngress resource to expose - non-HTTP based services outside a Kubernetes cluster. -- [Using mtls-auth plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-mtls-auth-plugin/) - This guide gives an overview of how to use `mtls-auth` plugin and CA - certificates to authenticate requests using client certificates. -- [Using OpenID-connect plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-oidc-plugin/) - This guide walks through steps necessary to set up OIDC authentication. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/preserve-client-ip.md b/app/kubernetes-ingress-controller/1.0.x/guides/preserve-client-ip.md deleted file mode 100644 index 0a76e545bbf5..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/preserve-client-ip.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Preserving Client IP Address ---- - -Kong is usually deployed behind a Load Balancer (using a -Kubernetes Service of type `LoadBalancer`). This can result -in loss of actual Client IP address and Kong observing the IP address -of the Load Balancer as the client IP address. This guide lays -out different methods of solving this problem. - -Preserving the Client IP address in cloud behind Load-Balancers requires -configuration that is be specific to your use-case, cloud provider -and other architecture details. -[This document](https://kubernetes.io/docs/tutorials/services/source-ip/) -provides details on how networking works inside Kubernetes and explains -in detail of how various methods describe later in this document work. -It is recommended that you give this a read. - -Following methods are possible to preserve Client IP address: - -## ExternalTrafficPolicy: Local - -As explained in -[Kubernetes docs](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), -setting `service.spec.externalTrafficPolicy` to `Local` preserves the Client -IP address. You don't need to change any configuration in Kong if you -are using this method to preserve Client IP address. - -Please note that this is not supported by all of the public Cloud providers. - -## Proxy Protocol - -If you have an L4 Load Balancer that supports Proxy Protocol, and you're -terminating TCP connections at the Load Balancer before passing traffic -onward to Kong, then you can configure Kong to pick up the Client IP -address via this protocol. - -Once you have configured the Load Balancer to use Proxy Protocol, you -need to set the following environment variables in Kong for Kong to -receive the Client IP from the Proxy Protocol header. - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_PROXY_LISTEN`](/gateway/latest/reference/configuration/#proxy_listen) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) - -For example: - -``` -KONG_TRUSTED_IPS=0.0.0.0/0,::/0 # This trusts all IPs -KONG_PROXY_LISTEN="0.0.0.0:8000 proxy_protocol, 0.0.0.0:8443 ssl proxy_protocol" -KONG_REAL_IP_HEADER=proxy_protocol -``` - -## HTTP headers - -If you are using an L7 Load Balancer, i.e. HTTP requests are being terminated -at the Load Balancer, then you need to use `x-forwarded-for` or `x-real-ip` -header to preserve details of the connection between the Client and Load Balancer. - -You should configure the Load Balancer to inject these headers, and then -you need to set the following environment variables in Kong for Kong to pick up -the Client IP address from HTTP headers: - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) -- Optional [`KONG_REAL_IP_RECURSIVE`](/gateway/latest/reference/configuration/#real_ip_recursive) - -Please note that if you are using an L7 Load Balancer with Kong, -you cannot use the `certificates` feature in Kong as the TLS session is -already established between the Client and Load Balancer. - -## Cloud-provider specific details - -For the major public clouds, follow are some additional -details that can help you preserve the client IP address: - -### GKE - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### AKS - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### EKS - -You have two options: - -- L4 Load Balancer - In this case, you need to use the Proxy Protocol method to preserve Client IP - address. -- L7 Load Balancer - In this case, you need to use the HTTP headers method to preserve the Client - IP address. - -The recommend Load Balancer type for AWS is NLB. -You can choose the type of Load Balancer using the following annotation: - -``` -service.beta.kubernetes.io/aws-load-balancer-type: nlb -``` - -Other useful annotations for AWS are: - -``` -service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp -service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' -``` - -A complete list can be found -[here](https://gist.github.com/mgoodness/1a2926f3b02d8e8149c224d25cc57dc1). diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/prometheus-grafana.md b/app/kubernetes-ingress-controller/1.0.x/guides/prometheus-grafana.md deleted file mode 100644 index 2a0ea1f0b450..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/prometheus-grafana.md +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: Integrate the Kong Ingress Controller with Prometheus/Grafana ---- - -The {{site.kic_product_name}} can give you visibility not only into how Kong is -performing but also gives visibilty into -how the services in your Kubernetes cluster are responding to the -inbound traffic. - -This how-to guide walks through the steps of how to configure Kong and -Prometheus to collect metrics from the {{site.kic_product_name}}. - -> Note: This guide was originally posted on Kong Inc's blog: -[https://konghq.com/blog/observability-kubernetes-kong/](https://konghq.com/blog/observability-kubernetes-kong) - -## Prerequisites - -You’ll need a few things before we can start: - -- **Kubernetes cluster**: You can use Minikube or a GKE cluster for the - purpose of this tutorial. We are running a GKE Kubernetes cluster v1.12.x. -- **Helm**: We will be using [Helm](https://helm.sh/) - to install all of our components. - Tiller should be installed on your k8s cluster and - Helm CLI should be available on your workstation. - You can follow Helm’s quickstart guide to set up helm. - -Once you have Kubernetes and Helm set up, please proceed. - -Caution: Settings here are tweaked to keep this guide simple. -These settings are not meant for production usage. - -## Install Prometheus and Grafana - -If you already have Prometheus and Grafana installed on your Kubernetes cluster, -you can skip these steps. - -### Prometheus - -First, we will install Prometheus with a -scrape interval of 10 seconds to have fine-grained data points for all metrics. -We’ll install both Prometheus and Grafana in a dedicated `monitoring` namespace. - -To install Prometheus, execute the following: - -```bash -$ kubectl create namespace monitoring -$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -$ helm install prometheus prometheus-community/prometheus --namespace monitoring --values https://bit.ly/2RgzDtg --version 11.0.3 -``` - -after prometheus installation, execute following command: -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/main/config/prometheus/monitor.yaml -``` - -### Grafana - -Grafana is installed with the following values for its Helm chart -(see comments for explanation): - -```yaml -persistence: - enabled: true # enable persistence using Persistent Volumes -datasources: - datasources.yaml: - apiVersion: 1 - Datasources: # configure Grafana to read metrics from Prometheus - - name: Prometheus - type: prometheus - url: http://prometheus-server # Since Prometheus is deployed in - access: proxy # same namespace, this resolves - # to the Prometheus Server we installed previous - isDefault: true # The default data source is Prometheus - -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' # Configure a dashboard provider file to - orgId: 1 # put Kong dashboard into. - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default -dashboards: - default: - kong-dash: - gnetId: 7424 # Install the following Grafana dashboard in the - revision: 5 # instance: https://grafana.com/dashboards/7424 - datasource: Prometheus -``` - -To install Grafana, execute the following: - -```bash -$ helm repo add grafana https://grafana.github.io/helm-charts -$ helm install grafana grafana/grafana --namespace monitoring --values http://bit.ly/2FuFVfV --version 5.0.8 -``` - -## Install Kong - -We will use Kong's Helm chart to install Kong -but you can also use plain manifests for this purpose. - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ kubectl create namespace kong -$ helm install mykong kong/kong --namespace kong --values https://bit.ly/2UAv0ZE -``` - -### Enable Prometheus plugin in Kong - -We will enable the Promtheus plugin in Kong at the global level, meaning -each request that flows into the Kubernetes cluster gets tracked in Prometheus: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: prometheus - annotations: - kubernetes.io/ingress.class: kong - labels: - global: "true" -plugin: prometheus -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/prometheus created -``` - -## Set Up Port Forwards - -Now, we will gain access to the components we just deployed. -In a production environment, you would have a Kubernetes Service with -an external IP or load balancer, which would allow you to access -Prometheus, Grafana, and Kong. -For demo purposes, we will set up port-forwarding using kubectl to get access. -It is not advisable to do this in production. - -Open a new terminal and execute the following commands: - -```bash -POD_NAME=$(kubectl get pods --namespace monitoring -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 9090 & - -# You can access Prometheus in your browser at localhost:9090 - -POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 3000 & - -# You can access Grafana in your browser at localhost:3000 - -POD_NAME=$(kubectl get pods --namespace kong -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace kong port-forward $POD_NAME 8000 & - -# Kong proxy port is now your localhost 8000 port -# We are using plain-text HTTP proxy for this purpose of -# demo. -# You can also use the LoadBalancer IP address and not set up this -# port-forwarding if you are running in a cloud environment. -``` - -## Access Grafana Dashboard - -To access Grafana, you need to get the password for the admin user. - -Execute the following to read the password and take note of it: - -```bash -kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo -``` - -Now, browse to [http://localhost:3000](http://localhost:3000) and -fill in username as “admin” and password as what you just noted above. -You should be logged in to Grafana and Kong’s Grafana Dashboard -should already be installed for you. - -## Setup Services - -We have all the components for monitoring installed, -we will now spin up some services for demo purposes and setup Ingress -routing for them. - -### Install Services - -We will set up three services: billing, invoice, and comments. -Execute the following to spin these services up: - -```bash -kubectl apply -f https://gist.githubusercontent.com/hbagdi/2d8ef66fe22cb99e1514f410f992268d/raw/a03d789b70c46ccd0b99d9f1ed838dc21419fc33/multiple-services.yaml -``` - -### Install Ingress for the Services - -Next, once the services are up and running, we will create Ingress -routing rules in Kubernetes. -This will configure Kong to proxy traffic destined for these services correctly. - -Execute the following: - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: sample-ingresses - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /billing - backend: - serviceName: billing - servicePort: 80 - - path: /comments - backend: - serviceName: comments - servicePort: 80 - - path: /invoice - backend: - serviceName: invoice - servicePort: 80 -' | kubectl apply -f - -``` - -## Let’s Create Some Traffic - -We’re done configuring our services and proxies. -Time to see if our setup works. -Execute the following in a new terminal: - -```bash -while true; -do - curl http://localhost:8000/billing/status/200 - curl http://localhost:8000/billing/status/501 - curl http://localhost:8000/invoice/status/201 - curl http://localhost:8000/invoice/status/404 - curl http://localhost:8000/comments/status/200 - curl http://localhost:8000/comments/status/200 - sleep 0.01 -done -``` - -Since we have already enabled Prometheus plugin in Kong to -collect metrics for requests proxied via Kong, -we should see metrics coming through in the Grafana dashboard. - -You should be able to see metrics related to the traffic flowing -through our services. -Try tweaking the above script to send different traffic patterns -and see how the metrics change. -The upstream services are httpbin instances, meaning you can use -a variety of endpoints to shape your traffic. - -## Metrics collected - -### Request Latencies of Various Services - -![Request latencies](/assets/images/products/kubernetes-ingress-controller/request-latencies.png) - -Kong collects latency data of how long your services take to respond to -requests. One can use this data to alert the on-call engineer if the latency -goes beyond a certain threshold. For example, let’s say you have an SLA -that your APIs will respond with latency of less than 20 millisecond -for 95% of the requests. -You could configure Prometheus to alert based on the following query: - -```text -histogram_quantile(0.95, sum(rate(kong_latency_bucket{type="request"}[1m])) by (le,service)) > 20 -``` - -The query calculates the 95th percentile of the the total request -latency (or duration) for all of your services and alerts you if it is more -than 20 milliseconds. -The “type” label in this query is “request”, which tracks the latency -added by Kong and the service. -You can switch this to “upstream” to track latency added by the service only. -Prometheus is highly flexible and well documented, so we won’t go into -details of setting up alerts here, but you’ll be able to find them -in the Prometheus documentation. - -### Kong Proxy Latency - -![Proxy latencies](/assets/images/products/kubernetes-ingress-controller/proxy-latencies.png) - -Kong also collects metrics about its performance. -The following query is similar to the previous one but gives -us insight into latency added by Kong: - -```text -histogram_quantile(0.90, sum(rate(kong_latency_bucket{type="kong"}[1m])) by (le,service)) > 2 -``` - -### Error Rates - -![Error rates](/assets/images/products/kubernetes-ingress-controller/error-rates.png) - -Another important metric to track is the rate of errors and requests -your services are serving. -The time series `kong_http_status` collects HTTP status code metrics -for each service. - -This metric can help you track the rate of errors for each of your service: - -```text -sum(rate(kong_http_status{code=~"5[0-9]{2}"}[1m])) by (service) -``` - -You can also calculate the percentage of requests in any duration -that are errors. Try to come up with a query to derive that result. - -Please note that all HTTP status codes are indexed, meaning you could use -the data to learn about your typical traffic pattern and identify problems. -For example, a sudden rise in 404 response codes could be indicative -of client codes requesting an endpoint that was removed in a recent deploy. - -### Request Rate and Bandwidth - -![Request rates](/assets/images/products/kubernetes-ingress-controller/request-rate.png) - -One can derive the total request rate for each of your services or -across your Kubernetes cluster using the `kong_http_status` time series. - -![Bandwidth](/assets/images/products/kubernetes-ingress-controller/bandwidth.png) - -Another metric that Kong keeps track of is the amount of -network bandwidth (`kong_bandwidth`) being consumed. -This gives you an estimate of how request/response sizes -correlate with other behaviors in your infrastructure. - -You now have metrics for the services running inside your Kubernetes cluster -and have much more visibility into your applications, without making -any modifications in your services. -You can use Alertmanager or Grafana to now configure alerts based on -the metrics observed and your SLOs. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/redis-rate-limiting.md b/app/kubernetes-ingress-controller/1.0.x/guides/redis-rate-limiting.md deleted file mode 100644 index 64f13251f006..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/redis-rate-limiting.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Using Redis for rate-limiting ---- - -Kong can rate-limit your traffic without any external dependency. -In such a case, Kong stores the request counters in-memory -and each Kong node applies the rate-limiting policy independently. -There is no synchronization of information being done in this case. -But if Redis is available in your cluster, Kong -can take advantage of it and synchronize the rate-limit information -across multiple Kong nodes and enforce a slightly different rate-limiting -policy. - -This guide walks through the steps of using Redis for rate-limiting in -a multi-node Kong deployment. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Set up rate-limiting - -We will start by creating a global rate-limiting policy: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -Here we are configuring the {{site.kic_product_name}} to rate-limit traffic from -any client to 5 requests per minute, and we are applying this policy in a -global sense, meaning the rate-limit will apply across all services. - -You can set this up for a specific Ingress or a specific service as well, -please follow [using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) -guide on steps for doing that. - -Next, test the rate-limiting policy by executing the following command -multiple times and observe the rate-limit headers in the response: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -As there is a single Kong instance running, Kong correctly imposes -the rate-limit and you can make only 5 requests in a minute. - -## Scale the controller to multiple pods - -Now, let's scale up the {{site.kic_product_name}} deployment to 3 pods, for -scalability and redundancy: - -```bash -$ kubectl scale --replicas 3 -n kong deployment ingress-kong -deployment.extensions/ingress-kong scaled -``` - -It will take a couple minutes for the new pods to start up. -Once the new pods are up and running, test the rate-limiting policy by -executing the following command and observing the rate-limit headers: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -You will observe that the rate-limit is not consistent anymore -and you can make more than 5 requests in a minute. - -To understand this behavior, we need to understand how we have configured Kong. -In the current policy, each Kong node is tracking a rate-limit in-memory -and it will allow 5 requests to go through for a client. -There is no synchronization of the rate-limit information across Kong nodes. -In use-cases where rate-limiting is used as a protection mechanism and to -avoid over-loading your services, each Kong node tracking its own counter -for requests is good enough as a malicious user will hit rate-limits on all -nodes eventually. -Or if the load-balancer in-front of Kong is performing some -sort of deterministic hashing of requests such that the same Kong node always -receives the requests from a client, then we won't have this problem at all. - -In some cases, a synchronization of information that each Kong node maintains -in-memory is needed. For that purpose, Redis can be used. -Let's go ahead and set this up next. - -## Deploy Redis to your Kubernetes cluster - -First, we will deploy redis in our Kubernetes cluster: - -```bash -$ kubectl apply -n kong -f https://bit.ly/k8s-redis -deployment.apps/redis created -service/redis created -``` - -Once this is deployed, let's update our KongClusterPlugin configuration to use -Redis as a datastore rather than each Kong node storing the counter information -in-memory: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: redis - redis_host: redis -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit configured -``` - -Notice, how the `policy` is now set to `redis` and we have configured Kong -to talk to the `redis` server available at `redis` DNS name, which is the -Redis node we deployed earlier. - -## Test it - -Now, if you go ahead and execute the following commands, you should be able -to make only 5 requests in a minute: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -This guide shows how to use Redis as a data-store for rate-limiting plugin, -but this can be used for other plugins which support Redis as a data-store -like proxy-cache. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/setting-up-custom-plugins.md b/app/kubernetes-ingress-controller/1.0.x/guides/setting-up-custom-plugins.md deleted file mode 100644 index 3c57eee57d16..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/setting-up-custom-plugins.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Setting up custom plugin in Kubernetes environment ---- - -This guide goes through steps on installing a custom plugin -in Kong without using a Docker build. - -## Prepare a directory with plugin code - -First, we need to create either a ConfigMap or a Secret with -the plugin code inside it. -If you would like to install a plugin which is available as -a rock from Luarocks, then you need to download it, unzip it and create a -ConfigMap from all the Lua files of the plugin. - -We are going to setup a dummy plugin next. -If you already have a real plugin, you can skip this step. - -```shell -$ mkdir myheader && cd myheader -$ echo 'local MyHeader = {} - -MyHeader.PRIORITY = 1000 - -function MyHeader:header_filter(conf) - -- do custom logic here - kong.response.set_header("myheader", conf.header_value) -end - -return MyHeader -' > handler.lua - -$ echo 'return { - name = "myheader", - fields = { - { config = { - type = "record", - fields = { - { header_value = { type = "string", default = "roar", }, }, - }, - }, }, - } -} -' > schema.lua -``` - -Once we have our plugin code available in a directory, -the directory should look something like this: - -```shell -$ tree myheader -myheader -├── handler.lua -└── schema.lua - -0 directories, 2 files -``` - -You might have more files inside the directory as well. - -## Create a ConfigMap or Secret with the plugin code - -Next, we are going to create a ConfigMap or Secret based on the plugin -code. - -Please ensure that this is created in the same namespace as the one -in which Kong is going to be installed. - -```shell -# using ConfigMap; replace `myheader` with the name of your plugin -$ kubectl create configmap kong-plugin-myheader --from-file=myheader -n kong -configmap/kong-plugin-myheader created - -# OR using Secret -$ kubectl create secret generic -n kong kong-plugin-myheader --from-file=myheader -secret/kong-plugin-myheader created -``` - -## Modify configuration - -Next, we need to update Kong's Deployment to load our custom plugin. - -Based on your installation method, this step will differ slightly. -The next section explains what changes are necessary. - -### YAML - -The following patch is necessary to load the plugin. -Notable changes: -- The plugin code is mounted into the pod via `volumeMounts` and `volumes` - configuration property. -- `KONG_PLUGINS` environment variable is set to include the custom plugin - along with all the plugins that come in Kong by default. -- `KONG_LUA_PACKAGE_PATH` environment variable directs Kong to look - for plugins in the directory where we are mounting them. - -If you have multiple plugins, simply mount multiple -ConfigMaps and include the plugin name in the `KONG_PLUGINS` -environment variable. - -> Please note that if your plugin code involves database - migration then you need to include the below patch to pod definition of your - migration Job as well. - -Please note that the below is not a complete definition of -the Deployment but merely a strategic patch which can be applied to -an existing Deployment. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ingress-kong - namespace: kong -spec: - template: - spec: - containers: - - name: proxy - env: - - name: KONG_PLUGINS - value: bundled,myheader - - name: KONG_LUA_PACKAGE_PATH - value: "/opt/?.lua;;" - volumeMounts: - - name: kong-plugin-myheader - mountPath: /opt/kong/plugins/myheader - volumes: - - name: kong-plugin-myheader - configMap: - name: kong-plugin-myheader -``` - -### Helm chart - -With Helm, this is as simple as adding the following values to -your `values.yaml` file: - -```yaml -# values.yaml -plugins: - configMaps: # change this to 'secrets' if you created a secret - - name: kong-plugin-myheader - pluginName: myheader -``` - -The chart automatically configures all the environment variables based on the -plugins you inject. - -Please ensure that you add in other configuration values -you might need for your installation to work. - -### Deploy - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Once, you have all the pieces in place, you are ready -to deploy the {{site.kic_product_name}}: - -```shell -# using YAML or kustomize -kustomize build github.com/hbagdi/yaml/kong/kong-custom-plugin | kubectl apply -f - - -# or helm -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong --values values.yaml - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false --values values.yaml -``` - -Once you have setup Kong with the custom plugin installed, you can use it -like any other plugin. - -First, create a `KongPlugin` custom resource: - -```yaml -echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: my-custom-plugin -config: - header_value: "my first plugin" -plugin: myheader -" | kubectl apply -f - -``` - -and then can annotate an Ingress or Service resource to instruct -Kong on when to execute the plugin: - -```yaml -konghq.com/plugins: my-custom-plugin -``` - -Once you have got Kong up and running, configure your -custom plugin via [KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/). - -### Plugins in other languages - -When deploying custom plugins in other languages, especially Golang, the built binary is larger than -the size limit of ConfigMap. In such cases, consider using an init container to pull large binaries from -remotes like S3 buckets, or build a custom image that includes plugin runtimes and the plugin itself. - -To read more about building a custom image, see -[use external plugins in container and Kubernetes](/gateway/latest/reference/external-plugins/#use-external-plugins-in-container-and-kubernetes). diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/upstream-mtls.md b/app/kubernetes-ingress-controller/1.0.x/guides/upstream-mtls.md deleted file mode 100644 index 7a6e50c40656..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/upstream-mtls.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Using mTLS with Kong ---- - -This guide walks through on how to setup Kong to perform mutual-TLS -authentication with an upstream service. - -> Please note that this guide walks through mTLS configuration between -Kong and a Service and not Kong and a client or consumer. - -## What is mTLS? - -Mutual authentication refers to two-way authencation, where the client and -server, both can authenticate themselves to the other party. - -With mutual TLS authentication, client and server both present TLS -certificates to the other party (and can prove their identity using their -private key) during the TLS handshake. They can verify the other's -certificate using the their trusted CAs. - -## mTLS with Kong - -Kong 1.3 and above support mutual TLS authentication between Kong and the -upstream service. - -Let's take a look at how one can configure it. - -## Configure Kong to verify upstream server certificate - -Kong, by default, does not verify the certificate presented by the upstream -service. - -To enforce certificate verification, you need to configure the following -environment variables on Kong's container in your deployment: - -``` -KONG_NGINX_PROXY_PROXY_SSL_VERIFY="on" -KONG_NGINX_PROXY_PROXY_SSL_VERIFY_DEPTH="3" -KONG_NGINX_PROXY_PROXY_SSL_TRUSTED_CERTIFICATE="/path/to/ca_certs.pem" -``` - -These basically translate to -[NGINX directives](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) -to configure NGINX to verify certificates. - -Please make sure that the trusted certificates are correctly -mounted into Kong's container and the path to certificate is correctly -reflected in the above environment variable. - -## Configure Kong to present its certificate to the upstream server - -In the above section, we achieved one side of mutual authentication, -where Kong has been configured to verify the identity of the upstream server. - -In this section, we will configure Kong to present its identity to the -upstream server. - -To configure this, you have two options, depending on your use-case. -If you would like Kong to present its client certificate to each and every -service that it talks to, you can configure the client certificate -at the global level using Nginx directives. -If you would like to configure a different certificate for -each service that Kong talks to or want to configure Kong to present a -client certificate only to a subset of all services that it is configured to -communicate with, then you can configure that using an annotation on -the Kubernetes Service resource. - -### Global Nginx directive - -You need to configure two Nginx directives for this purpose: -- [`proxy_ssl_certificate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) -- [`proxy_ssl_certificate_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) - -You can mount the certificate and key pair using secrets into the Kong pod -and then set the following two environment variables to set the above two -directives: - -``` -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE="/path/to/client_cert.pem" -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE_KEY="/path/to/key.pem" -``` - -Once configured, Kong will present its client certificate to every upstream -server that it talks to. - -### Per service annotation - -To configure a different client certificate for each service or only for a -subset of services, you can do so using the -[`konghq.com/client-cert`](/kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcom/client-cert) -annotation. - -To use the annotation, you first need to create a TLS secret with the -client certificate and key in Kubernetes. -The secret should be created in the same namespace as your Kubernetes -Service to which Kong should authenticate itself. - -Once the secret is in place, add the follow annotation on the service: - -``` -konghq.com/client-cert: -``` - -Kong will then use the TLS key-pair to authenticate itself against that service. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-consumer-credential-resource.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-consumer-credential-resource.md deleted file mode 100644 index 8c1acc30e7f8..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-consumer-credential-resource.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Provisioning Consumers and Credentials ---- - -This guide walks through how to use the KongConsumer custom -resource and use Secret resources to associate credentials with those -consumers. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Add authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. - -Let's add a KongPlugin resource to protect the API: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Now, associate this plugin with the previous Ingress rule we created -using the `konghq.com/plugins` annotation: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - konghq.com/plugins: httpbin-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any request matching the proxying rules defined in the `demo` ingress will -now require a valid API key: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 19:30:33 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -{"message":"No API key found in request"} -``` - -As you can see above, Kong returns back a `401 Unauthorized` because -we didn't provide an API key. - -## Provision a Consumer - -Let's create a KongConsumer resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, let's provision an API-key associated with -this consumer so that we can pass the authentication imposed by Kong: - -Next, we will create a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) -resource with an API-key inside it: - -```bash -$ kubectl create secret generic harry-apikey \ - --from-literal=kongCredType=key-auth \ - --from-literal=key=my-sooper-secret-key -secret/harry-apikey created -``` - -The type of credential is specified via `kongCredType`. -You can create the Secret using any other method as well. - -Since we are using the Secret resource, -Kubernetes will encrypt and store this API-key for us. - -Next, we will associate this API-key with the consumer we created previously. - -Please note that we are not re-creating the KongConsumer resource but -only updating it to add the `credentials` array: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -## Use the credential - -Now, use the credential to pass authentication: - -```bash -$ curl -i -H 'apikey: my-sooper-secret-key' $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:34:44 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -In this guide, we learned how to leverage an authentication plugin in Kong -and provision credentials. This enables you to offload authentication into -your Ingress layer and keeps the application logic simple. - -All other authentication plugins bundled with Kong work in this -way and can be used to quickly add an authentication layer on top of -your microservices. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-external-service.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-external-service.md deleted file mode 100644 index 3e47325ff76c..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-external-service.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Expose an external application ---- - -This example shows how we can expose a service located outside the Kubernetes cluster using an Ingress. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a Kubernetes service - -First we need to create a Kubernetes Service [type=ExternalName][0] using the hostname of the application we want to expose. - -```bash -echo " -kind: Service -apiVersion: v1 -metadata: - name: proxy-to-httpbin -spec: - ports: - - protocol: TCP - port: 80 - type: ExternalName - externalName: httpbin.org -" | kubectl create -f - -``` - -## Create an Ingress to expose the service at the path `/foo` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: proxy-from-k8s-to-httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: proxy-to-httpbin - servicePort: 80 -' | kubectl create -f - -``` - -## Test the service - -```bash -$ curl -i $PROXY_IP/foo -``` - -[0]: https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-ingress-with-grpc.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-ingress-with-grpc.md deleted file mode 100644 index 64995357181b..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-ingress-with-grpc.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Using Ingress with gRPC ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Pre-requisite - -To make `gRPC` requests, you need a client which can invoke gRPC requests. -In this guide, we use -[`grpcurl`](https://github.com/fullstorydev/grpcurl#installation). -Please ensure that you have that installed in on your local system. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -#### Running GRPC - -1. Add a grpc deployment and service - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/sample-apps/grpc.yaml -service/grpcbin created -deployment.apps/grpcbin created -``` -1. Create a demo grpc ingress rule: - -```bash -$ echo "apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: grpcbin - servicePort: 9001" | kubectl apply -f - -ingress.extensions/demo created -``` -1. Next, we need to update the Ingress rule to specify gRPC as the protocol. -By default, all routes are assumed to be either HTTP or HTTPS. This annotation -informs Kong that this route is a gRPC(s) route and not a plain HTTP route: - -``` -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"grpc,grpcs"}}}' -``` - -1. Next, we also update the upstream protocol to be `grpcs`. -Similar to routes, Kong assumes that services are HTTP-based by default. -With this annotation, we configure Kong to use gRPCs protocol when it -talks to the upstream service: - -``` -$ kubectl patch svc grpcbin -p '{"metadata":{"annotations":{"konghq.com/protocol":"grpcs"}}}' -``` - -1. You should be able to run a request over `gRPC`: - -``` -$ grpcurl -v -d '{"greeting": "Kong Hello world!"}' -insecure $PROXY_IP:443 hello.HelloService.SayHello -``` diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-kong-with-knative.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-kong-with-knative.md deleted file mode 100644 index 7fe55abf04fc..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-kong-with-knative.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: Using Kong with Knative ---- - -The {{site.kic_product_name}} supports managing ingress traffic for -serverless workloads managed via Knative. - -In this guide, we will learn how to use Kong with Knative services and -configure plugins for Knative services. - - -## Pre-requisite - -This guide will be easier to follow if you have access to a Kubernetes -cluster that is running in the cloud rather than Minikube or any other -local environment. The guide requires access to DNS and a public IP -address or DNS name will certainly keep things simpler and easy for you. - -## Install Knative - -If you don't have knative installed, you need to install Knative: - -``` -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-crds.yaml -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-core.yaml -``` - -This will install the resources that are required to run Knative. - -## Install Kong - -Next, install the {{site.kic_product_name}}: - -``` -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -``` - -You can choose to install a different flavor, like using a database, -or using an Enterprise installation instead of Open-Source. You can also -use Helm installation method if that works for you. - -Once Kong is installed, -you should note down the IP address or public CNAME of -`kong-proxy` service. - -In the current case case, - -```shell -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.248.154 35.247.39.83 80:30345/TCP,443:31872/TCP 53m -``` - -Take a note of the above IP address "`35.247.39.83`". This will be different -for your installation. - -## Configure Knative to use Kong for Ingress - -### Ingress class - -Next, we will configure Knative to use `kong` as the Ingress class: - -``` -$ kubectl patch configmap/config-network \ - --namespace knative-serving \ - --type merge \ - --patch '{"data":{"ingress.class":"kong"}}' -``` - -## Setup Knative domain - -As the final step, we need to configure Knative's base domain at which -our services will be accessible. - -We override the default ConfigMap with the DNS name of `${KONG_IP}.xip.io`. -This will be different for you: - -``` -$ echo ' -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-domain - namespace: knative-serving - labels: - serving.knative.dev/release: v0.13.0 -data: - 35.247.39.83.xip.io: "" -' | kubectl apply -f - -configmap/config-domain configured -``` - -Once this is done, the setup is complete and we can move onto using Knative -and Kong. - -## Test connectivity to Kong - -Send a request to the above domain that we have configured: - -```bash -curl -i http://35.247.39.83.xip.io/ -HTTP/1.1 404 Not Found -Date: Wed, 11 Mar 2020 00:18:49 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -X-Kong-Response-Latency: 1 -Server: kong/1.4.3 - -{"message":"no Route matched with those values"} -``` - -The 404 response is expected since we have not configured any services -in Knative yet. - -## Install a Knative Service - -Let's install our first Knative service: - -``` -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -``` - -It can take a couple of minutes for everything to get configured but -eventually, you will see the URL of the Service. -Let's make the call to the URL: - -```shell -$ curl -v http://helloworld-go.default..xip.io -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Tue, 10 Mar 2020 23:45:14 GMT -X-Kong-Upstream-Latency: 2723 -X-Kong-Proxy-Latency: 0 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -The request is served by Knative and from the response HTTP headers, -we can tell that the request was proxied by Kong. - -The first request will also take longer to complete as Knative will spin -up a new Pod to service the request. -We can see how Kong observed this latency and recorded it in the -`X-Kong-Upstream-Latency` header. -If you perform subsequent requests, -they should complete much faster. - -## Plugins for knative services - -Let's now execute a plugin for our new Knative service. - -First, let's create a KongPlugin resource: - -```shell -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong -plugin: response-transformer -" | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will update the Knative service created before and add in -annotation in the template: - -```shell -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - metadata: - annotations: - konghq.com/plugins: add-response-header - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -service.serving.knative.dev/helloworld-go configured -``` - -Please note that the annotation `konghq.com/plugins` is -not added to the Service definition -itself but to the `spec.template.metadata.annotations`. - -Let's make the request again: - -```shell -$ curl -i http://helloworld-go.default.35.247.39.83.xip.io/ -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Wed, 11 Mar 2020 00:35:07 GMT -demo: injected-by-kong -X-Kong-Upstream-Latency: 2455 -X-Kong-Proxy-Latency: 1 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -As we can see, the response has the `demo` header injected. - -This guide demonstrates the power of using Kong and Knative together. -Checkout other plugins and try them out with multiple Knative services. -The possibilities are endless! diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongclusterplugin-resource.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-kongclusterplugin-resource.md deleted file mode 100644 index 141f2043f806..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongclusterplugin-resource.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Using KongClusterPlugin resource ---- - -In this guide, we will learn how to use KongClusterPlugin resource to configure -plugins in Kong. -The guide will cover configuring a plugin for services across different -namespaces. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service in their corresponding namespaces. - -```bash -$ kubectl create namespace httpbin -namespace/httpbin created -$ kubectl apply -n httpbin -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: httpbin-app - namespace: httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created - -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: echo-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -# access httpbin service -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# access echo service -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -## Create KongClusterPlugin resource - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header created -``` - -Note how the resource is created at cluster-level and not in any specific -namespace: - -```bash -$ kubectl get kongclusterplugins -NAME PLUGIN-TYPE AGE -add-response-header response-transformer 4s -``` - -If you send requests to `PROXY_IP` now, you will see that the header is not -injected in the responses. The reason being that we have created a -resource but we have not told Kong when to execute the plugin. - -## Configuring plugins on Ingress resources - -We will associate the `KongClusterPlugin` resource with the two Ingress resources -that we previously created: - -```bash -$ kubectl patch ingress -n httpbin httpbin-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/httpbin-app patched - -$ kubectl patch ingress -n echo echo-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/echo-app patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching any of the above two Ingress rules is -processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in our two Ingress rules. - -## Updating plugin configuration - -Now, let's update the plugin configuration to change the header value from -`injected-by-kong` to `injected-by-kong-for-kubernetes`: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong-for-kubernetes" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header configured -``` - -If you repeat the requests from the last step, you will see Kong -now responds with updated header value. - -This guides demonstrates how plugin configuration can be shared across -services running in different namespaces. -This can prove to be useful if the persona controlling the plugin -configuration is different from service owners that are responsible for the -Service and Ingress resources in Kubernetes. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongingress-resource.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-kongingress-resource.md deleted file mode 100644 index de12faa0ea8f..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongingress-resource.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -title: Using KongIngress resource ---- - -In this guide, we will learn how to use KongIngress resource to control -proxy behavior. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Install a dummy service - -We will start by installing the echo service. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/foo - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/foo -``` - -## Use KongIngress with Ingress resource - -By default, Kong will proxy the entire path to the service. -This can be seen in the real path value in the above response. - -We can configure Kong to strip out the part of the path defined in the -Ingress rule and to only respond to GET requests for this particular rule. - -To modify these behaviours, let's first create a KongIngress resource -defining the new behaviour: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: sample-customization -route: - methods: - - GET - strip_path: true" | kubectl apply -f - -kongingress.configuration.konghq.com/test created -``` - -Now, let's associate this KongIngress resource with our Ingress resource -using the `konghq.com/override` annotation. - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/override":"sample-customization"}}}' -ingress.extensions/demo patched -``` - -Now, Kong will proxy only GET requests on `/foo` path and -strip away `/foo`: - -```bash -$ curl -s $PROXY_IP/foo -X POST -{"message":"no Route matched with those values"} - - -$ curl -s $PROXY_IP/foo/baz - - -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/baz -``` - -As you can see, the real path value is `/baz`. - -## Use KongIngress with Service resource - -KongIngress can be used to change load-balancing, health-checking and other -proxy behaviours in Kong. - -Next, we are going to tweak two settings: - -- Configure Kong to hash the requests based on IP address of the client. -- Configure Kong to proxy all the request on `/foo` to `/bar`. - -Let's create a KongIngress resource with these settings: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-customization -upstream: - hash_on: ip -proxy: - path: /bar/' | kubectl apply -f - -kongingress.configuration.konghq.com/demo-customization created -``` - -Now, let's associate this KongIngress resource to the echo service. - -```bash -$ kubectl patch service echo -p '{"metadata":{"annotations":{"konghq.com/override":"demo-customization"}}}' -service/echo patched -``` - -Let's test this now: - -```bash -$ curl $PROXY_IP/foo/baz -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/bar/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/bar/baz - -<-- clipped --> -``` - -Real path received by the upstream service (echo) is now changed to `/bar/baz`. - -Also, now all the requests will be sent to the same upstream pod: - -```bash -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -``` - - -You can experiement with various load balancing and healthchecking settings -that KongIngress resource exposes to suit your specific use case. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongplugin-resource.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-kongplugin-resource.md deleted file mode 100644 index 02ce7664b7a4..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-kongplugin-resource.md +++ /dev/null @@ -1,469 +0,0 @@ ---- -title: Using KongPlugin resource ---- - -In this guide, we will learn how to use KongPlugin resource to configure -plugins in Kong to modify requests for a specific request path. -The guide will cover configuring a plugin for a specific service, a set of Ingress rules -and for a specific user of the API. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 - - path: /bar - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - - - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -Let's add another Ingress resource which proxies requests to `/baz` to httpbin -service: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-2 - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /baz - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-2 created -``` - -We will use this path later. - -## Configuring plugins on Ingress resource - -Next, we will configure two plugins on the Ingress resource. - -First, we will create a KongPlugin resource: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will associate it with our Ingress rules: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/demo patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching the Ingress rule is processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in the `demo` Ingress resource. - -If we send a request to `/baz`, then we can see that the header is not injected -by Kong: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:56:20 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Here, we have successfully setup a plugin which is executed only when a -request matches a specific `Ingress` rule. - -## Configuring plugins on Service resource - -Next, we will see how we can configure Kong to execute plugins for requests -which are sent to a specific service. - -Let's add a `KongPlugin` resource for authentication on the httpbin service: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Next, we will associate this plugin to the httpbin service running in our -cluster: - -```bash -$ kubectl patch service httpbin -p '{"metadata":{"annotations":{"konghq.com/plugins":"httpbin-auth"}}}' -service/httpbin patched -``` - -Now, any request sent to the service will require authentication, -no matter which `Ingress` rule it matched: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:09:04 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:12:13 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -demo: injected-by-kong -Server: kong/1.2.1 -``` - -You can also see how the `demo` header was injected as the request also -matched one of the rules defined in the `demo` `Ingress` resource. - -## Configure consumer and credential - -Follow the [Using Consumers and Credentials](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) -guide to provision a user and an apikey. -Once you have it, please continue: - -Use the API key to pass authentication: - -```bash -$ curl -I $PROXY_IP/baz -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:16:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:15:34 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 -``` - -## Configure a global plugin - -Now, we will protect our Kubernetes cluster. -For this, we will be configuring a rate-limiting plugin, which -will throttle requests coming from the same client. - -This must be a cluster-level `KongClusterPlugin` resource, as `KongPlugin` -resources cannot be applied globally, to preserve Kubernetes RBAC guarantees -for cross-namespace isolation. - -Let's create the `KongClusterPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -With this plugin (please note the `global` label), every request through -the {{site.kic_product_name}} will be rate-limited: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Configure a plugin for a specific consumer - -Now, let's say we would like to give a specific consumer a higher rate-limit. - -For this, we can create a `KongPlugin` resource and then associate it with -a specific consumer. - -First, create the `KongPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: harry-rate-limit -config: - minute: 10 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/harry-rate-limit created -``` - -Next, associate this with the consumer: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong - konghq.com/plugins: harry-rate-limit -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -Note the annotation being added to the `KongConsumer` resource. - -Now, if the request is made as the `harry` consumer, the client -will be rate-limited differently: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 10 -X-RateLimit-Remaining-minute: 9 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# a regular unauthenticated request -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -This guide demonstrates how you can use the {{site.kic_product_name}} to -impose restrictions and transformations -on various levels using Kubernetes style APIs. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-mtls-auth-plugin.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-mtls-auth-plugin.md deleted file mode 100644 index ab361aeda05c..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-mtls-auth-plugin.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Using mtls-auth plugin ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -verify client certificates using CA certificates and -[mtls-auth](https://docs.konghq.com/hub/kong-inc/mtls-auth/) plugin -for HTTPS requests. - -> Note: You need an Enterprise license to use this feature. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -Kong for Kubernetes Enterprise on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise) to configure -this environment variable. - -If everything is set up correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Provision a CA certificate in Kong - -CA certificates in Kong are provisioned by create a `Secret` resource in -Kubernetes. - -The secret resource must have a few properties: -- It must have the `konghq.com/ca-cert: "true"` label. -- It must have a `cert` data property which contains a valid CA certificate - in PEM format. -- It must have an `id` data property which contains a random UUID. -- It must have a `kubernetes.io/ingress.class` annotation whose value matches - the value of the controller's `--ingress-class` argument. By default, that - value is "kong". - -Note that a self-signed CA certificate is being used for the purpose of this -guide. You should use your own CA certificate that is backed by -your PKI infrastructure. - -**This example is used to show the YAML format of a `Secret` resource for the CA certificate. DO NOT directly use the certificate here. -You should use your own CA certificate, or generate a self-signed certificate for testing.** To generate self-signed CA certificates, follow these instructions: - -```bash -openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes\ - -subj "/C=US/ST=California/L=San Francisco/O=Kong/OU=Org/CN=www.example.com" -``` - -```bash -$ echo "apiVersion: v1 -kind: Secret -metadata: - name: my-ca-cert - annotations: - kubernetes.io/ingress.class: kong - labels: - konghq.com/ca-cert: 'true' -type: Opaque -stringData: - cert: | - -----BEGIN CERTIFICATE----- - MIICwTCCAamgAwIBAgIUHGUzUWvHJHrREvIZIcORiFUvze4wDQYJKoZIhvcNAQEL - BQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcNMjAwNTA4MjExODA1WhcNMjAwNjA3MjEx - ODA1WjAQMQ4wDAYDVQQDDAVIZWxsbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC - AQoCggEBANCMMBngjuTvqts8ZXtZhqdr181QH/NmytW1KlyqZd6ppXUer+i0OWhP - 1nAyHsBPJljKAFLd8l1EioPFkN78/wJFDJrHOtfniIQPVLdS2cnNQ72dLyQH6smH - JQDV8ePBQ2GdRP6s61+Da8eoaW6nSLtmEUhxvyteboqwmi2CtUtAfuiU1m5sOdpS - z+L4D08CE+SFIT4MGD3gxNdg7lccWCHIfk54VRSdGDKEVwed8OQvxD0TdpHY+ym5 - nJ4JSkhiS9XIodnxR3AZ6rIPRqk+MQ4LGTjX2EbM0/Yg4qvnZ7m4fcpK2goDZIVL - EF8F+ka1RaAYWTsXI1BAkJbb3kdo/yUCAwEAAaMTMBEwDwYDVR0TBAgwBgEB/wIB - ADANBgkqhkiG9w0BAQsFAAOCAQEAVvB/PeVZpeQ7q2IQQQpADtTd8+22Ma3jNZQD - EkWGZEQLkRws4EJNCCIvkApzpx1GqRcLLL9lbV+iCSiIdlR5W9HtK07VZ318gpsG - aTMNrP9/2XWTBzdHWaeZKmRKB04H4z7V2Dl58D+wxjdqNWsMIHeqqPNKGamk/q8k - YFNqNwisRxMhU6qPOpOj5Swl2jLTuVMAeGWBWmPGU2MUoaJb8sc2Vix9KXcyDZIr - eidkzkqSrjNzI0yJ2gdCDRS4/Rw9iV3B3SRMs0mJMLBDrsowhNfLAd8I3NHzLwps - dZFcvZcT/p717K3hlFVdjGnKIgKcG7aYji/XRR87HKnc+cJMCw== - -----END CERTIFICATE----- - id: cce8c384-721f-4f58-85dd-50834e3e733a" | kubectl create -f - -secret/my-ca-cert created -``` - -Please note the ID, you can use this ID one or use a different one but -the ID is important in the next step when we create the plugin. -Each CA certificate that you create needs a unique ID. -Any random UUID will suffice here and it doesn't have an security -implication. - -You can use [uuidgen](https://linux.die.net/man/1/uuidgen) (Linux, OS X) or -[New-Guid](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/new-guid) -(Windows) to generate an ID. - -For example: -```bash -$ uuidgen -907821fc-cd09-4186-afb5-0b06530f2524 -``` - -## Configure mtls-auth plugin - -Next, we are going to create an `mtls-auth` KongPlugin resource which references -CA certificate provisioned in the last step: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: mtls-auth -config: - ca_certificates: - - cce8c384-721f-4f58-85dd-50834e3e733a - skip_consumer_lookup: true - revocation_check_mode: SKIP -plugin: mtls-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/mtls-auth created -``` - -## Install a dummy service - -Let's deploy an echo service which we wish to protect -using TLS client certificate authentication. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -You can deploy a different service or skip this step if you already -have a service deployed in Kubernetes. - -## Set up Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/plugins: mtls-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -## Test the endpoint - -Now, let's test to see if Kong is asking for client certificate -or not when we make the request: - -``` -$ curl -k https://$PROXY_IP -HTTP/2 401 -date: Mon, 11 May 2020 18:15:05 GMT -content-type: application/json; charset=utf-8 -content-length: 50 -x-kong-response-latency: 0 -server: kong/2.0.4.0-enterprise-k8s - -{"message":"No required TLS certificate was sent"} -``` - -As we can see, Kong is restricting the request because it doesn't -have the necessary authentication information. - -Two things to note here: -- `-k` is used because Kong is set up to serve a self-signed certificate - by default. For full mutual authentication in production use cases, - you must configure Kong to serve a certificate that is signed by a trusted CA. -- For some deployments `$PROXY_IP` might contain a port that points to - `http` port of Kong. In others, it might happen that it contains a DNS name - instead of an IP address. If needed, please update the - command to send an `https` request to the `https` port of Kong or - the load balancer in front of it. - - -## Provisioning credential - -Next, in order to authenticate against Kong, create the client -certificate and private key with the following content: - -{:.important} ->This example is only used to show the format of the client certificate and private key. **DO NOT** use the certificate and private key directly. -You should use a certificate and private key signed by your own CA. - -```bash -$ cat client.crt ------BEGIN CERTIFICATE----- -MIIEFTCCAv0CAWUwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcN -MjAwNTA4MjE0OTE1WhcNMjEwNTA4MjE0OTE1WjCBkDELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxDTALBgNVBAcMBHNvbWUxETAPBgNVBAoMCHNvbWUg -b3JnMRAwDgYDVQQLDAdvcmd1bml0MRswGQYDVQQDDBJleGFtcGxlLmtvbmdocS5j -b20xGzAZBgkqhkiG9w0BCQEWDGZvb0Bzb21lLmNvbTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBAM/y80ppzwGYS7zl+A6fx4Xkjwja+ZUK/AoBDazS3TkR -W1tDFZ71koLd60qK2W1d9Wh0/F3iNTcobVefr02mEcLtl+d4zUug+W7RsK/8JSCM -MIDVDYzlTWdd7RJzV1c/0NFZyTRkEVSjGn6eQoC/1aviftiNyfqWtuIDQ5ctSBt8 -2fyvDwu/tBR5VyKu7CLnjZ/ffjNT8WDfbO704XeBBId0+L8i8J7ddYlRhZufdjEw -hKx2Su8PZ9RnJYShTBOpD0xdveh16eb7dpCZiPnp1/MOCyIyo1Iwu570VoMde9SW -sPFLdUMiCXw+A4Gp/e9Am+D/98PiL4JChKsiowbzpDfMrVQH4Sblpcgn/Pp+u1be -2Kl/7wqr3TA+w/unLnBnB859v3wDhSW4hhKASoFwyX3VfJ43AkmWFUBX/bpDvHto -rFw+MvbSLsS3QD5KlZmega1pNZtin5KV8H/oJI/CjEc9HHwd27alW9VkUu0WrH0j -c98wLHB/9xXLjunabxSmd+wv25SgYNqpsRNOLgcJraJbaRh4XkbDyuvjF2bRJVP4 -pIjntxQHS/oDFFFK3wc7fp/rTAl0PJ7tytYj4urg45N3ts7unwnB8WmKzD9Avcwe -8Kst12cEibS8X2sg8wOqgB0yarC17mBEqONK7Fw4VH+VzZYw0KGF5DWjeSXj/XsD -AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEvTMHe27npmyJUBxQeHcNFniMJUWZf0 -i9EGd+XlF+m/l3rh1/mCecV7s32QTZEiFHv4UJPYASbgtx7+mEZuq7dVsxIUICWs -gyRkwvKjMqK2tR5IRkquhK5PuDS0QC3M/ZsDwnTgaezFrplFYf80z1kAAkm/c7eh -ZEjI6+1vuaS+HX1w2unk42PiAEB6oKFi3b8xl4TC6acYfMYiC3cOa/d3ZKHhqXhT -wM0VtDe0Qn1kExe+19XJG5cROelxmMXBm1+/c2KUw1yK8up6kJlEsmd8JLw/wMUp -xcJUKIH1qGBlRlFTYbVell+dB7IkHhadrnw27Z47uHobB/lzN69r63c= ------END CERTIFICATE----- -``` - -```bash -$ cat client.pem ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAz/LzSmnPAZhLvOX4Dp/HheSPCNr5lQr8CgENrNLdORFbW0MV -nvWSgt3rSorZbV31aHT8XeI1NyhtV5+vTaYRwu2X53jNS6D5btGwr/wlIIwwgNUN -jOVNZ13tEnNXVz/Q0VnJNGQRVKMafp5CgL/Vq+J+2I3J+pa24gNDly1IG3zZ/K8P -C7+0FHlXIq7sIueNn99+M1PxYN9s7vThd4EEh3T4vyLwnt11iVGFm592MTCErHZK -7w9n1GclhKFME6kPTF296HXp5vt2kJmI+enX8w4LIjKjUjC7nvRWgx171Jaw8Ut1 -QyIJfD4Dgan970Cb4P/3w+IvgkKEqyKjBvOkN8ytVAfhJuWlyCf8+n67Vt7YqX/v -CqvdMD7D+6cucGcHzn2/fAOFJbiGEoBKgXDJfdV8njcCSZYVQFf9ukO8e2isXD4y -9tIuxLdAPkqVmZ6BrWk1m2KfkpXwf+gkj8KMRz0cfB3btqVb1WRS7RasfSNz3zAs -cH/3FcuO6dpvFKZ37C/blKBg2qmxE04uBwmtoltpGHheRsPK6+MXZtElU/ikiOe3 -FAdL+gMUUUrfBzt+n+tMCXQ8nu3K1iPi6uDjk3e2zu6fCcHxaYrMP0C9zB7wqy3X -ZwSJtLxfayDzA6qAHTJqsLXuYESo40rsXDhUf5XNljDQoYXkNaN5JeP9ewMCAwEA -AQKCAgAt5cC/HuV6w6OL2PJMQAXozo6ndLV7qQYCK0Nabtw3GVahqQffovIoglTJ -iiX9Vqyw1olRK3l1aC3iFjO6Hrpy3MAXbflaBPio9g1aenuzWF3oQZ4RCBdyhi+q -T9zqPAKaAog/UQrmNG3HnqStCCpgGsjGgV0gOx24euHzPyJYNtFiTT0z6acUkcei -txsVhSgkLk8Lgy6WpBnGEDSnjMl0IGQ6w7p6RgUIPv8PXz3WE5BlBGL7qtnO7slA -Id3JxRnEUDh3U3dB7SS5O7oY9v0b/3CDpsuXe3wd1G432E0Zmf0J9Q63t46CZqmd -d+i9YvRE0BpemNDFnmrr3uQ+x43qARtonEELirY99aW0hUUfD7PieLNnZP7tueVB -J80GUU5ckJhn9u6SlKZtvBU2mAWvaKZEv1+9vDh4Le8fNtubpC5YtSKztc66csL6 -DLtyi81iftpF2YtDVKK8UB35KyG/0IWkXyfquOkYuL8RwrJR9tNL1+Zs4GqgC5sH -fVIwR6/+w/kpeH9nP8/0VaXRoqCjKQpVjFg9f645rZQ/OzcnQNv6k8Sr+4zHaHog -uFwOo7p4QfIPIBfU8+8RD36C5U/p5PiouR8sN+rfDCu0N07XKmHAphlqvjTR+OG/ -J5o3jGgAerMZn3gmiGUS+IdmrPw7we8gc8j8E8C6TjvlALQNOQKCAQEA6ySvPyMw -hiqfa9TeYce1gI2HtRyiCM1r7otFmTqS/I53he7b9LAZ5+gqXxMS/PW9QTvKET2q -vRU+xZYD4h/i9R+qZT3s7EsNBXBQHkvh0m0qNRtrsSgAYCWLsI/0nUOKIz6obHu5 -5SxS8y3y1t9SoXvWpzTpAnuk91BVMtSephf/4/hXlH2d1WnOC0SqS979aRrm8NE/ -rdT5qchhySyfZkYbADxy5AHHqoFTtkxGnLvcbY0X/oJI3zNYCFKTFNmb6/61cxuB -REjwREUFOhneXYb9mBG4bxuteCz65MyshiN1EAsVMnI6aEuHR6EAvt1Jslv7Qi1a -2UKM61XcL8m/lQKCAQEA4mTGaoZJ1yz+TCKMuae33Y9assXOYAQpdb3MY2UTgzQg -JAZYmwaAsBaC1e49G0eYVAP+eDI4u0OR0f0CW9Pf+OdGRWuZfVum0d+PmcIhJfgM -jXsR4CJpPcX7VZLHMZ77QFDh/xBHNXR8F1latPXFYR3ytcXxl4HEtodDpS84AgiO -57yPitY78MS16l3GJGWlgDdRP/LvVixugH2steHCtk8l932/qayUeezbYSEhyQ6L -13f0qRaBhvRsoULj3HvQWNPxmGYK3p+N+zXc1CErF6x8sDq4jeXyNg+26gZknea8 -3SEKKx+Wf4vT3rlUEeYy0uFubG06qYCdtj2ZuSOKNwKCAQEAgJpQqkRRrp8zD6Od -eHbkIonFVd1yFURcKlvLVdF+QFiesAaCD+IcWQRV4Cerc+PmfP35NtK2RbGP4jp4 -pzxvQUbvR23F3Tnyxi2188vmltKTifYUQRCym+MM8iTZUQV2UG5daO+GLPu/5jYU -IUaEh8MWE97RLUV4ZLZv0lwM5KQtlH3nUFQfdW/ne6wzQ0mS6OAIvF6E6EqZvSzV -plQcXbAr5kYpQ+BhVjRjF0nCOMhZ9yR6ofyZZFFNbUfUH0wghcKJdInveew2U/A3 -up4ZSqegnIHckA/gIODg2y/Bj59mz75v+mYU4aOlOhypLroSK1U5JultTXOjZLZR -tWUuvQKCAQAVcti9hOWABlo9WlSczkAENK2yHD12KU7IQegYTp4vowOchiFk5pPQ -mwFgRUbyy7Cp3QsB1jg7vaYWD/NmQceJbFfjAdOz5bgDUDvppFPBpiOCT/OcmYYA -/T3XmKVYlShWqpMOuDsW3GdZSvTmChbeIZk6EXvXD8tUQ7Jr9vJGdwsa92leDPf2 -0pwtjR7Vme+5GwSOm3SDZIg/kiiHvtDUtuDw9q/u4lPazU7nf90UkFU9X7cFQgWZ -hJS6Hn06CVzu3X2ZI6nJ97Ha5/p4+n97qbLSe226u9tbtddtipeDwjWIebXd6gs3 -IEc9Za+KVpXgFs2AZkTVhELs3h8vRCe3AoIBAQDRr0k5OePCsDbs6RadGI9Ta+pf -I30u8imKw8Rih++127UPjpc8OCzaQNvWnpdAoJTgo12fQJqGigRUfJMFFQn7u3jz -ggAq9WLRsXRZpEXk8NXDr/WhksOoWmkxLf4uNO7l2AytIFqZbb1pmTd0g+np2yBE -8VgDR45IxbGPQLsTzKXeXJuXOi7ut2ehJ+VgsS84BsRTeO4v+Y2qpGcyw6fXtU3E -NDrWe/C5QceILtDcd+JiXUgKrHRK+qrfawoxPBDVhYJ+N/Y7SqvZ2GvxibnRs8YA -cbhEebkfUHRQSEqkPr+ndRHInwWTMAWF4IhSuQOpTvT7PY7UNet2io8W8Py6 ------END RSA PRIVATE KEY----- -``` - -Now, use the key and certificate to authenticate against Kong and use the -service: - -```bash -$ curl --key client.key --cert client.crt https://$PROXY_IP/foo -k -I -HTTP/2 200 -content-type: text/plain; charset=UTF-8 -date: Mon, 11 May 2020 18:27:22 GMT -server: echoserver -x-kong-upstream-latency: 1 -x-kong-proxy-latency: 1 -via: kong/2.0.4.0-enterprise-k8s -``` - -## Conclusion - -This guide demonstrates how to implement client TLS authentication -using Kong. -You are free to use other features that mtls-auth plugin in Kong to -achieve more complicated use-cases. diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-oidc-plugin.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-oidc-plugin.md deleted file mode 100644 index 701241331423..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-oidc-plugin.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Using OIDC plugin ---- - -{{site.ee_product_name}}'s OIDC plugin can authenticate requests using OpenID Connect protocol. -This guide shows a basic example of how to setup the OIDC plugin using -the Ingress Controller. - -> Note: This works only with Enterprise version of Kong. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) documentation -to install enterprise version of the {{site.kic_product_name}}. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: 192.0.2.8.xip.io - http: - paths: - - path: / - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -We are using `192.0.2.8.xip.io` as our host, you can use any domain name -of your choice. A domain name is a prerequisite for this guide. -For demo purpose, we are using [xip.io](http://xip.io) -service to avoid setting up a DNS record. - -Test the Ingress rule: - -```bash -$ curl -i 192.0.2.8.xip.io/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Next, open a browser and browse to `http://192.0.2.8.xip.io`. -You should see landing page same as httpbin.org. - -## Setup OIDC plugin - -Now we are going to protect our dummy service with OpenID Connect -protocol using Google as our identity provider. - -First, set up an OAuth 2.0 application in -[Google](https://developers.google.com/identity/protocols/oauth2/openid-connect). - -Once you have setup your application in Google, use the client ID and client -secret and create a KongPlugin resource in Kubernetes: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: oidc-auth -config: - issuer: https://accounts.google.com/.well-known/openid-configuration - client_id: - - - client_secret: - - - redirect_uri: - - http://192.0.2.8.xip.io -plugin: openid-connect -" | kubectl apply -f - -kongplugin.configuration.konghq.com/global-rate-limit created -``` - -The `redirect_uri` parameter must be a URI that matches the Ingress rule we -created earlier. You must also [add it to your Google OIDC -configuration](https://developers.google.com/identity/protocols/oauth2/openid-connect#setredirecturi) - -Next, enable the plugin on our Ingress: - -```bash -$ kubectl patch ing demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"oidc-auth"}}}' -ingress.extensions/demo patched -``` -## Test - -Now, if you visit the host you have set up in your Ingress resource, -Kong should redirect you to Google to verify your identity. -Once you identify yourself, you should be able to browse our dummy service -once again. - -This basic configuration permits any user with a valid Google account to access -the dummy service. -For setting up more complicated authentication and authorization flows, -please read -[plugin docs](/gateway/latest/configure/auth/oidc-google/). diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-rewrites.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-rewrites.md deleted file mode 100644 index cdba8d4bb91f..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-rewrites.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Rewriting hosts and paths ---- -This guide demonstrates host and path rewrites using Ingress and Service configuration. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a test Deployment - -To test our requests, we create an echo server Deployment, which responds to -HTTP requests with a summary of the request contents: - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -After completing the examples in the guide, you can clean up the example -configuration with `kubectl delete namespace echo`. - -For your actual production configuration, replace `echo` with whatever -namespace you use to run your application. - -## Create a Kubernetes service - -First, create a Kubernetes Service: - -```bash -echo " -apiVersion: v1 -kind: Service -metadata: - name: echo - namespace: echo -spec: - selector: - app: echo - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 80 -" | kubectl apply -f - -``` - -When referenced by an Ingress, this Service will create a Kong service and -upstream that uses the upstream IPs (Pod IPs) for its `Host` header and appends -request paths starting at `/`. - -## Create an Ingress to expose the service at the path `/myapp` on `example.com` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: my-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: myapp.example.com - http: - paths: - - path: /myapp - backend: - serviceName: echo - servicePort: 80 -' | kubectl create -f - -``` - -This Ingress will create a Kong route attached to the service we created above. -It will preserve its path but honor the service's hostname, so this request: - -```bash -$ curl -svX GET http://myapp.example.com/myapp/foo --resolve myapp.example.com:80:$PROXY_IP -GET /myapp/foo HTTP/1.1 -Host: myapp.example.com -User-Agent: curl/7.70.0 -Accept: */* -``` -will appear upstream as: - -``` -GET /myapp/foo HTTP/1.1 -Host: 10.16.4.8 -User-Agent: curl/7.70.0 -Accept: */* -``` - -We'll use this same cURL command in other examples as well. - -Actual output from cURL and the echo server will be more verbose. These -examples are condensed to focus primarily on the path and Host header. - -Note that this default behavior uses `strip_path=false` on the route. This -differs from Kong's standard default to conform with expected ingress -controller behavior. - -## Rewriting the host - -There are two options to override the default `Host` header behavior: - -- Add the [`konghq.com/host-header` annotation][1] to your Service, which sets - the `Host` header directly: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/host-header":"internal.myapp.example.com"}}}' - ``` - The request upstream will now use the header from that annotation: - ``` - GET /myapp/foo HTTP/1.1 - Host: internal.myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/preserve-host` annotation][0] to your Ingress, which - sends the route/Ingress hostname: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/preserve-host":"true"}}}' - ``` - The request upstream will now include the hostname from the Ingress rule: - ``` - GET /myapp/foo HTTP/1.1 - Host: myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` - -The `preserve-host` annotation takes precedence, so if you add both annotations -above, the upstream host header will be `myapp.example.com`. - -## Rewriting the path - -There are two options to rewrite the default path handling behavior: - -- Add the [`konghq.com/strip-path` annotation][2] to your Ingress, which strips - the path component of the route/Ingress, leaving the remainder of the path at - the root: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/strip-path":"true"}}}' - ``` - The request upstream will now only contain the path components not in the - Ingress rule: - ``` - GET /foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/path` annotation][3] to your Service, which prepends - that value to the upstream path: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/path":"/api"}}}' - ``` - The request upstream will now contain a leading `/api`: - ``` - GET /api/myapp/foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -`strip-path` and `path` can be combined together, with the `path` component -coming first. Adding both annotations above will send requests for `/api/foo`. - -[0]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompreserve-host -[1]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomhost-header -[2]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomstrip-path -[3]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompath diff --git a/app/kubernetes-ingress-controller/1.0.x/guides/using-tcpingress.md b/app/kubernetes-ingress-controller/1.0.x/guides/using-tcpingress.md deleted file mode 100644 index dd759e971f62..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/guides/using-tcpingress.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: TCPIngress with Kong ---- - -This guide walks through using TCPIngress Custom Resource -resource to expose TCP-based services running in Kubernetes to the out -side world. - -## Overview - -TCP-based Ingress means that Kong simply forwards the TCP stream to a Pod -of a Service that's running inside Kubernetes. Kong will not perform any -sort of transformations. - -There are two modes avaialble: -- **Port based routing**: In this mode, Kong simply proxies all traffic it - receives on a specific port to the Kubernetes Service. TCP connections are - load balanced across all the available pods of the Service. -- **SNI based routing**: In this mode, Kong accepts a TLS-encrypted stream - at the specified port and can route traffic to different services based on - the `SNI` present in the TLS handshake. Kong will also terminate the TLS - handshake and forward the TCP stream to the Kubernetes Service. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -> **Note**: This feature works with Kong versions 2.0.4 and above. - -> **Note**: This feature is available in Controller versions 0.8 and above. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Configure Kong for new ports - -First, we will configure Kong's Deployment and Service to expose two new ports -9000 and 9443. Port 9443 expects a TLS connection from the client. - -```shell -$ kubectl patch deploy -n kong ingress-kong --patch '{ - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "proxy", - "env": [ - { - "name": "KONG_STREAM_LISTEN", - "value": "0.0.0.0:9000, 0.0.0.0:9443 ssl" - } - ], - "ports": [ - { - "containerPort": 9000, - "name": "stream9000", - "protocol": "TCP" - }, - { - "containerPort": 9443, - "name": "stream9443", - "protocol": "TCP" - } - ] - } - ] - } - } - } -}' -deployment.extensions/ingress-kong patched -``` - -```shell -$ kubectl patch service -n kong kong-proxy --patch '{ - "spec": { - "ports": [ - { - "name": "stream9000", - "port": 9000, - "protocol": "TCP", - "targetPort": 9000 - }, - { - "name": "stream9443", - "port": 9443, - "protocol": "TCP", - "targetPort": 9443 - } - ] - } -}' -service/kong-proxy patched -``` - -You are free to choose other ports as well. - -## Install TCP echo service - -Next, we will install a dummy TCP service. -If you already have a TCP-based service running in your cluster, -you can use that as well. - -```shell -$ kubectl apply -f https://bit.ly/tcp-echo -deployment.apps/tcp-echo created -service/tcp-echo created -``` - -Now, we have a TCP echo service running in Kubernetes. -We will now expose this on plain-text and a TLS based port. - -## TCP port based routing - -To expose our service to the outside world, create the following -`TCPIngress` resource: - -```shell -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-plaintext - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - port: 9000 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-plaintext created -``` - -Here we are instructing Kong to forward all traffic it receives on port -9000 to `tcp-echo` service on port 2701. - -Once created, we can see the IP address at which this is available: - -```shell -$ kubectl get tcpingress -NAME ADDRESS AGE -echo-plaintext 3m18s -``` - -Lets connect to this service using `telnet`: - -```shell -$ telnet $PROXY_IP 9000 -Trying 35.247.39.83... -Connected to 35.247.39.83. -Escape character is '^]'. -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^] -telnet> Connection closed. -``` - -We can see here that the `tcp-echo` service is now available outside the -Kubernetes cluster via Kong. - -## TLS SNI based routing - -Next, we will demonstrate how Kong can help expose the `tcp-echo` service -in a secure manner to the outside world. - -Create the following TCPIngress resource: - -``` -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-tls - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - port: 9443 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-tls created -``` - -Now, we can access the `tcp-echo` service on port 9443, on SNI `example.com`. - -You should setup a DNS record for a Domain that you control -to point to PROXY_IP and then access -the service via that for production usage. - -In our contrived demo example, we can connect to the service via TLS -using `openssl`'s `s_client` command: - -```shell -$ openssl s_client -connect $PROXY_IP:9443 -servername example.com -quiet -openssl s_client -connect 35.247.39.83:9443 -servername foo.com -quiet -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify error:num=18:self signed certificate -verify return:1 -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify return:1 -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^C -``` - -Since Kong is not configured with a TLS cert-key pair for `example.com`, Kong -is returning a self-signed default certificate, which is not trusted. -You can also see that the echo service is running as expected. - -## Bonus - -Scale the `tcp-echo` Deployment to have multiple replicas and observe how -Kong load-balances the TCP-connections between pods. - -## Conclusion - -In this guide, we see how to use Kong's TCP routing capabilities using -TCPIngress Custom Resource. This can be very useful if you have services -running inside Kubernetes that have custom protocols instead of the more -popular HTTP or gRPC protocols. diff --git a/app/kubernetes-ingress-controller/1.0.x/index.md b/app/kubernetes-ingress-controller/1.0.x/index.md deleted file mode 100644 index 34bbdc768d68..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Kong Ingress Controller ---- - -## Concepts - -### Architecture - -The [design][design] document explains how the {{site.kic_product_name}} works -inside a Kubernetes cluster and configures Kong to proxy traffic as per -rules defined in the Ingress resources. - -### Custom Resources - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, the `KongIngress` Custom resource is used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to [custom resources][crd] concept document for more details. - -### Deployment Methods - -The {{site.kic_product_name}} can be deployed in a variety of deployment patterns. -Please refer to the [deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/) documentation, -which explains all the components -involved and different ways of deploying them based on the use-case. - -### High-availability and Scaling - -The {{site.kic_product_name}} is designed to scale with your traffic -and infrastructure. -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) to understand -failures scenarios, recovery methods, as well as scaling considerations. - -### Ingress classes - -[Ingress classes](/kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes) filter which resources the -controller loads. They ensure that {{site.kic_product_name}} instances do not -load configuration intended for other instances or other ingress controllers. - -### Security - -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/security/) to understand the -default security settings and how to further secure the Ingress Controller. - -## Guides and Tutorials - -Please browse through [guides][guides] to get started or understand how to configure -a specific setting with the {{site.kic_product_name}}. - -## Configuration Reference - -The configurations in the {{site.kic_product_name}} can be tweaked using -Custom Resources and annotations. -Please refer to the following documents detailing this process: - -- [Custom Resource Definitions](/kubernetes-ingress-controller/{{page.release}}/references/custom-resources/) -- [Annotations](/kubernetes-ingress-controller/{{page.release}}/references/annotations/) -- [CLI arguments](/kubernetes-ingress-controller/{{page.release}}/references/cli-arguments/) -- [Version compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/version-compatibility/) -- [Plugin compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/plugin-compatibility/) - -## FAQs - -[FAQs][faqs] will help find answers to common problems quickly. -Please feel free to open Pull Requests to contribute to the list. - -## Troubleshooting - -Please read through our [deployment guide][deployment] for a detailed -understanding of how Ingress Controller is designed and deployed -along alongside Kong. - -- [FAQs][faqs] might help as well. -- [Troubleshooting][troubleshooting] guide can help - resolve some issues. - Please contribute back if you feel your experience can help - the larger community. - -[annotations]: /kubernetes-ingress-controller/{{page.release}}/references/annotations -[crd]: /kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources -[deployment]: /kubernetes-ingress-controller/{{page.release}}/deployment/overview -[design]: /kubernetes-ingress-controller/{{page.release}}/concepts/design -[faqs]: /kubernetes-ingress-controller/{{page.release}}/faq -[troubleshooting]: /kubernetes-ingress-controller/{{page.release}}/troubleshooting -[guides]: /kubernetes-ingress-controller/{{page.release}}/guides/overview diff --git a/app/kubernetes-ingress-controller/1.0.x/references/annotations.md b/app/kubernetes-ingress-controller/1.0.x/references/annotations.md deleted file mode 100644 index d13cf80d7e48..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/references/annotations.md +++ /dev/null @@ -1,428 +0,0 @@ ---- -title: Kong Ingress Controller annotations ---- - -The {{site.kic_product_name}} supports the following annotations on various -resources: - -## Ingress resource - -Following annotations are supported on Ingress resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the Ingress rules that Kong should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for specific Ingress. | -| [`konghq.com/protocols`](#konghqcomprotocols) | Set protocols to handle for each Ingress resource. | -| [`konghq.com/preserve-host`](#konghqcompreserve-host) | Pass the `host` header as is to the upstream service. | -| [`konghq.com/strip-path`](#konghqcomstrip-path) | Strip the path defined in Ingress resource and then forward the request to the upstream service. | -| [`konghq.com/https-redirect-status-code`](#konghqcomhttps-redirect-status-code) | Set the HTTPS redirect status code to use when an HTTP request is received. | -| [`konghq.com/regex-priority`](#konghqcomregex-priority) | Set the route's regex priority. | -| [`konghq.com/methods`](#konghqcommethods) | Set methods matched by this Ingress. | -| [`konghq.com/override`](#konghqcomoverride) | Control other routing attributes via `KongIngress` resource. | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-ingress-v1beta1` controller flag removes that requirement: -when enabled, the controller will process Ingresses with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is intended for -older configurations, as controller versions prior to 0.10 processed classless -Ingress resources by default. - -## Service resource - -Following annotations are supported on Service resources: - -| Annotation name | Description | -|-----------------|-------------| -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific Service | -| [`konghq.com/protocol`](#konghqcomprotocol) | Set protocol Kong should use to talk to a Kubernetes service | -| [`konghq.com/path`](#konghqcompath) | HTTP Path that is always prepended to each request that is forwarded to a Kubernetes service | -| [`konghq.com/client-cert`](#konghqcomclient-cert) | Client certificate and key pair Kong should use to authenticate itself to a specific Kubernetes service | -| [`konghq.com/host-header`](#konghqcomhost-header) | Set the value sent in the `Host` header when proxying requests upstream | -| [`konghq.com/override`](#konghqcomoverride) | Fine grained routing and load-balancing | -| [`ingress.kubernetes.io/service-upstream`](#ingresskubernetesioservice-upstream) | Offload load-balancing to kube-proxy or sidecar | - -## KongConsumer resource - -Following annotations are supported on KongConsumer resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the KongConsumers that a controller should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific consumer | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-kong-consumer` controller flag removes that requirement: -when enabled, the controller will process KongConsumers with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is primarily intended for -older configurations, as controller versions prior to 0.10 processed classless -KongConsumer resources by default. - -## Annotations - -### kubernetes.io/ingress.class - -If you have multiple Ingress controllers in a single cluster, -you can pick one by specifying the `ingress.class` annotation. -Following is an example of -creating an Ingress with an annotation: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "gce" -spec: - rules: - - host: example.com - http: - paths: - - path: /test1 - backend: - serviceName: echo - servicePort: 80 -``` - -This will target the GCE controller, forcing the {{site.kic_product_name}} to -ignore it. - -On the other hand, an annotation such as - -```yaml -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "kong" -``` - -will target the {{site.kic_product_name}}, forcing the GCE controller -to ignore it. - -The following resources _require_ this annotation by default: - -- Ingress -- KongConsumer -- TCPIngress -- KongClusterPlugin -- Secret resources with the `ca-cert` label - -You can optionally allow Ingress or KongConsumer resources with no class -annotation (by setting the `--process-classless-ingress-v1beta1` or -`--process-classless-kong-consumer` flags, respectively), though recommended -best practice is to leave these flags disabled: the flags are primarily -intended for compatibility with configuration created before this requirement -was introduced in controller 0.10. - -If you allow classless resources, you must take care when using multiple -controller instances in a single cluster: only one controller instance should -enable these flags to avoid different controller instances fighting over -classless resources, which will result in unexpected and unknown behavior. - -The ingress class used by the {{site.kic_product_name}} to filter Ingress -resources can be changed using the `CONTROLLER_INGRESS_CLASS` -environment variable. - -```yaml -spec: - template: - spec: - containers: - - name: kong-ingress-internal-controller - env: - - name: CONTROLLER_INGRESS_CLASS - value: kong-internal -``` - -#### Multiple unrelated {{site.kic_product_name}}s {#multiple-unrelated-controllers} - -In some deployments, one might use multiple {{site.kic_product_name}}s -in the same Kubernetes cluster -(e.g. one which serves public traffic, one which serves "internal" traffic). -For such deployments, please ensure that in addition to different -`ingress-class`, the `--election-id` is also different. - -In such deployments, `kubernetes.io/ingress.class` annotation can be used on the -following custom resources as well: - -- KongPlugin: To configure (global) plugins only in one of the Kong clusters. -- KongConsumer: To create different consumers in different Kong clusters. -- KongCredential: To create associated credentials for consumers. - -### konghq.com/plugins - -> Available since controller 0.8 - -Kong's power comes from its plugin architecture, where plugins can modify -the request and response or impose certain policies on the requests as they -are proxied to your service. - -With the {{site.kic_product_name}}, plugins can be configured by creating -`KongPlugin` Custom Resources and then associating them with an Ingress, Service, -KongConsumer or a combination of those. - -Following is an example of how to use the annotation: - -```yaml -konghq.com/plugins: high-rate-limit, docs-site-cors -``` - -Here, `high-rate-limit` and `docs-site-cors` -are the names of the KongPlugin resources which -should be to be applied to the Ingress rules defined in the -Ingress resource on which the annotation is being applied. - -This annotation can also be applied to a Service resource in Kubernetes, which -will result in the plugin being executed at Service-level in Kong, -meaning the plugin will be -executed for every request that is proxied, no matter which Route it came from. - -This annotation can also be applied to a KongConsumer resource, -which results in plugin being executed whenever the specific consumer -is accessing any of the defined APIs. - -Finally, this annotation can also be applied on a combination of the -following resources: -- **Ingress and KongConsumer** - If an Ingress resource and a KongConsumer resource share a plugin in the - `konghq.com/plugins` annotation then the plugin will be created for the - combination of those to resources in Kong. -- **Service and KongConsumer** - Same as the above case, if you would like to give a specific consumer or - client of your service some special treatment, you can do so by applying - the same annotation to both of the resources. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how this annotation can be used. - - -### konghq.com/path - -> Available since controller 0.8 - -This annotation can be used on a Service resource only. -This annotation can be used to prepend an HTTP path of a request, -before the request is forwarded. - -For example, if the annotation `konghq.com/path: "/baz"` is applied to a -Kubernetes Service `billings`, then any request that is routed to the -`billings` service will be prepended with `/baz` HTTP path. If the -request contains `/foo/something` as the path, then the service will -receive an HTTP request with path set as `/baz/foo/something`. - -### konghq.com/strip-path - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the part of the path specified in the Ingress rule - will be stripped out before the request is sent to the service. - For example, if the Ingress rule has a path of `/foo` and the HTTP request - that matches the Ingress rule has the path `/foo/bar/something`, then - the request sent to the Kubernetes service will have the path - `/bar/something`. -- `"false"`: If set to false, no path manipulation is performed. - -All other values are ignored. -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/strip-path: "true" -``` - -### konghq.com/preserve-host - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the `host` header of the request will be sent - as is to the Service in Kubernetes. -- `"false"`: If set to false, the `host` header of the request is not preserved. - -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/preserve-host: "true" -``` - -### konghq.com/https-redirect-status-code - -> Available since controller 0.8 - -By default, Kong sends HTTP Status Code 426 for requests -that need to be redirected to HTTPS. -This can be changed using this annotations. -Acceptable values are: -- 301 -- 302 -- 307 -- 308 -- 426 - -Any other value will be ignored. - -Sample usage: - -```yaml -konghq.com/https-redirect-status-code: "301" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/regex-priority - -> Available since controller 0.9 - -Sets the `regex_priority` setting to this value on the Kong route associated -with the Ingress resource. This controls the [matching evaluation -order](/gateway/latest/reference/proxy/#evaluation-order) for regex-based -routes. It accepts any integer value. Routes are evaluated in order of highest -priority to lowest. - -Sample usage: - -```yaml -konghq.com/regex-priority: "10" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/methods - -> Available since controller 0.9 - -Sets the `methods` setting on the Kong route associated with the Ingress -resource. This controls which request methods will match the route. Any -uppercase alpha ASCII string is accepted, though most users will use only -[standard methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). - -Sample usage: - -```yaml -konghq.com/methods: "GET,POST" -``` - -### konghq.com/override - -> Available since controller 0.8 - -This annotation can associate a KongIngress resource with -an Ingress or a Service resource. -It serves as a way to bridge the gap between a sparse Ingress API in Kubernetes -with fine-grained controlled using the properties of Service, Route -and Upstream entities in Kong. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this annotation. - -### konghq.com/protocol - -> Available since controller 0.8 - -This annotation can be set on a Kubernetes Service resource and indicates -the protocol that should be used by Kong to communicate with the service. -In other words, the protocol is used for communication between a -[Kong Service](/gateway/api/admin-ee/latest/#/Services/list-service/) and -a Kubernetes Service, internally in the Kubernetes cluster. - -Accepted values are: -- `http` -- `https` -- `grpc` -- `grpcs` -- `tcp` -- `tls` - -### konghq.com/protocols - -> Available since controller 0.8 - -This annotation sets the list of acceptable protocols for the all the rules -defined in the Ingress resource. -The protocols are used for communication between the -Kong and the external client/user of the Service. - -You usually want to set this annotation for the following two use-cases: -- You want to redirect HTTP traffic to HTTPS, in which case you will use - `konghq.com/protocols: "https"` -- You want to define gRPC routing, in which case you should use - `konghq.com/protocols: "grpc,grpcs"` - -### konghq.com/client-cert - -> Available since controller 0.8 - -This annotation sets the certificate and key-pair Kong should use to -authenticate itself against the upstream service, if the upstream service -is performing mutual-TLS (mTLS) authentication. - -The value of this annotation should be the name of the Kubernetes TLS Secret -resource which contains the TLS cert and key pair. - -Under the hood, the controller creates a Certificate in Kong and then -sets the -[`service.client_certificate`](/gateway/api/admin-ee/latest/#/Services/list-service/) -for the service. - -### konghq.com/host-header - -> Available since controller 0.9 - -Sets the `host_header` setting on the Kong upstream created to represent a -Kubernetes Service. By default, Kong upstreams set `Host` to the hostname or IP -address of an individual target (the Pod IP for controller-managed -configuration). This annotation overrides the default behavior and sends -the annotation value as the `Host` header value. - -If `konghq.com/preserve-host: true` is present on an Ingress (or -`route.preserve_host: true` is present in a linked KongIngress), it will take -precedence over this annotation, and requests to the application will use the -hostname in the Ingress rule. - -Sample usage: - -```yaml -konghq.com/host-header: "test.example.com" -``` - -### ingress.kubernetes.io/service-upstream - -By default, the {{site.kic_product_name}} distributes traffic amongst all the -Pods of a Kubernetes `Service` by forwarding the requests directly to -Pod IP addresses. One can choose the load-balancing strategy to use -by specifying a KongIngress resource. - -However, in some use-cases, the load-balancing should be left up -to `kube-proxy`, or a sidecar component in the case of Service Mesh deployments. - -Setting this annotation to a Service resource in Kubernetes will configure -the {{site.kic_product_name}} to directly forward -the traffic outbound for this Service -to the IP address of the service (usually the ClusterIP). - -`kube-proxy` can then decide how it wants to handle the request and route the -traffic accordingly. If a sidecar intercepts the traffic from the controller, -it can also route traffic as it sees fit in this case. - -Following is an example snippet you can use to configure this annotation -on a `Service` resource in Kubernetes, (please note the quotes around `true`): - -```yaml -annotations: - ingress.kubernetes.io/service-upstream: "true" -``` - -You need {{site.kic_product_name}} >= 0.6 for this annotation. diff --git a/app/kubernetes-ingress-controller/1.0.x/references/cli-arguments.md b/app/kubernetes-ingress-controller/1.0.x/references/cli-arguments.md deleted file mode 100644 index 4683b67080ae..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/references/cli-arguments.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: CLI Arguments ---- - -Various settings and configurations of the controller can be tweaked -using CLI flags. - -## Environment variables - -Each flag defined in the table below can also be configured using -an environment variable. The name of the environment variable is `CONTROLLER_` -string followed by the name of flag in uppercase. - -For example, `--ingress-class` can be configured using the following -environment variable: - -``` -CONTROLLER_INGRESS_CLASS=kong-foobar -``` - -It is recommended that all the configuration is done via environment variables -and not CLI flags. - -## Flags - -Following table describes all the flags that are available: - -| Flag | Type | Default | Description | -|-------|------|---------|-------------| -| --admission-webhook-cert-file |`string` | `/admission-webhook/tls.crt` | Path to the PEM-encoded certificate file for TLS handshake.| -| --admission-webhook-key-file |`string` | `/admission-webhook/tls.key` | Path to the PEM-encoded private key file for TLS handshake.| -| --admission-webhook-cert |`string` | none | PEM-encoded certificate string for TLS handshake.| -| --admission-webhook-key |`string` | none | PEM-encoded private key string for TLS handshake.| -| --admission-webhook-listen |`string` | `off` | The address to start admission controller on (ip:port). Setting it to 'off' disables the admission controller.| -| --anonymous-reports |`string` | `true` | Send anonymized usage data to help improve Kong.| -| --apiserver-host |`string` | none | The address of the Kubernetes API server to connect to in the format of protocol://address:port, e.g., "http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.| -| --disable-ingress-extensionsv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `extensions/v1beta1`.| -| --disable-ingress-networkingv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1beta1`.| -| --disable-ingress-networkingv1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1`.| -| --election-id |`string` | `ingress-controller-leader` | The name of ConfigMap (in the same namespace) to use to facilitate leader-election between multiple instances of the controller.| -| --ingress-class |`string` | `kong` | Ingress class name to use to filter Ingress and custom resources when multiple Ingress Controllers are running in the same Kubernetes cluster.| -| --kong-admin-ca-cert-file |`string` | none | Path to PEM-encoded CA certificate file to verify Kong's Admin SSL certificate.| -| --kong-admin-ca-cert |`string` | none | PEM-encoded CA certificate string to verify Kong's Admin SSL certificate.| -| --kong-admin-concurrency |`int` | `10` | Max number of concurrent requests sent to Kong's Admin API.| -| --kong-admin-filter-tag |`string` | `managed-by-ingress-controller` | The tag used to manage entities in Kong.| -| --kong-admin-header |`string` | none | Add a header (key:value) to every Admin API call, this flag can be used multiple times to specify multiple headers.| -| --kong-admin-token |`string` | none | Set the {{site.ee_product_name}} RBAC token to be used by the controller.| -| --kong-admin-tls-server-name |`string` | none | SNI name to use to verify the certificate presented by Kong in TLS.| -| --kong-admin-tls-skip-verify |`boolean` | `false` | Disable verification of TLS certificate of Kong's Admin endpoint.| -| --kong-admin-url |`string` | `http://localhost:8001` | The address of the Kong Admin URL to connect to in the format of `protocol://address:port`.| -| --kong-workspace |`string` | `default` | Workspace in {{site.ee_product_name}} to be configured.| -| --kong-custom-entities-secret |`string` | none | Secret containing custom entities to be populated in DB-less mode, takes the form `namespace/name`.| -| --log-format |`string` | `text` | Format of logs of the controller. Allowed values are `text` and `json`. | -| --log-level |`string` | `info` | Level of logging for the controller. Allowed values are `trace`, `debug`, `info`, `warn`, `error`, `fatal` and `panic`. | -| --enable-reverse-sync |`bool` | `false` | Enable reverse checks from Kong to Kubernetes. Use this option only if a human has edit access to Kong's Admin API. | -| --kubeconfig |`string` | none | Path to kubeconfig file with authorization and master location information.| -| --profiling |`boolean` | `true` | Enable profiling via web interface `host:port/debug/pprof/`. | -| --publish-service |`string` | none | The namespaces and name of the Kubernetes Service fronting the {{site.kic_product_name}} in the form of namespace/name. The controller will set the status of the Ingress resources to match the endpoints of this service. In reference deployments, this is kong/kong-proxy.| -| --publish-status-address |`string` | none | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address.| -| --process-classless-ingress-v1beta1 |`boolean` | `false` | Toggles whether the controller processes `extensions/v1beta1` and `networking/v1beta1` Ingress resources that have no `kubernetes.io/ingress.class` annotation.| -| --process-classless-ingress-v1 |`boolean` | `false` | Toggles whether the controller processes `networking/v1` Ingress resources that have no `kubernetes.io/ingress.class` annotation or class field.| -| --process-classless-kong-consumer |`boolean` | `false` | Toggles whether the controller processes KongConsumer resources that have no `kubernetes.io/ingress.class` annotation.| -| --stderrthreshold |`string` | `2` | logs at or above this threshold go to stderr.| -| --sync-period |`duration` | `10m` | Relist and confirm cloud resources this often.| -| --sync-rate-limit |`float32` | `0.3` | Define the sync frequency upper limit. | -| --update-status |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname.| -| --update-status-on-shutdown |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped.| -| --version |`boolean` | `false` | Shows release information about the {{site.kic_product_name}}.| -| --watch-namespace |`string` | none | Namespace to watch for Ingress and custom resources. The default value of an empty string results in the controller watching for resources in all namespaces and configuring Kong accordingly.| -| --help |`boolean` | `false` | Shows this documentation on the CLI and exit.| diff --git a/app/kubernetes-ingress-controller/1.0.x/references/custom-resources.md b/app/kubernetes-ingress-controller/1.0.x/references/custom-resources.md deleted file mode 100644 index e0f7205e4fea..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/references/custom-resources.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -The Ingress Controller can configure Kong specific features -using several [Custom Resource Definitions(CRDs)][k8s-crd]. - -Following CRDs enables users to declaratively configure all aspects of Kong: - -- [**KongPlugin**](#kongplugin): This resource corresponds to - the [Plugin][kong-plugin] entity in Kong. -- [**KongIngress**](#kongingress): This resource provides fine-grained control - over all aspects of proxy behaviour like routing, load-balancing, - and health checking. It serves as an "extension" to the Ingress resources - in Kubernetes. -- [**KongConsumer**](#kongconsumer): - This resource maps to the [Consumer][kong-consumer] entity in Kong. -- [**TCPIngress**](#tcpingress): - This resource can configure TCP-based routing in Kong for non-HTTP - services running inside Kubernetes. -- [**KongCredential (Deprecated)**](#kongcredential-deprecated): - This resource maps to - a credential (key-auth, basic-auth, jwt, hmac-auth) that is associated with - a specific KongConsumer. - -## KongPlugin - -This resource provides an API to configure plugins inside Kong using -Kubernetes-style resources. - -Please see the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#KongPlugin) -document for how the resource should be used. - -The following snippet shows the properties available in KongPlugin resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: - namespace: -disabled: # optionally disable the plugin in Kong -config: # configuration for the plugin - key: value -configFrom: - secretKeyRef: - name: - key: -plugin: # like key-auth, rate-limiting etc -``` - -- `config` contains a list of `key` and `value` - required to configure the plugin. - All configuration values specific to the type of plugin go in here. - Please read the documentation of the plugin being configured to set values - in here. For any plugin in Kong, anything that goes in the `config` JSON - key in the Admin API request, goes into the `config` YAML key in this resource. - Please use a valid JSON to YAML convertor and place the content under the - `config` key in the YAML above. -- `configFrom` contains a reference to a Secret and key, where the key contains - a complete JSON or YAML configuration. This should be used when the plugin - configuration contains sensitive information, such as AWS credentials in the - Lambda plugin or the client secret in the OIDC plugin. Only one of `config` - or `configFrom` may be used in a KongPlugin, not both at once. -- `plugin` field determines the name of the plugin in Kong. - This field was introduced in {{site.kic_product_name}} 0.2.0. - -**Please note:** validation of the configuration fields is left to the user -by default. It is advised to setup and use the admission validating controller -to catch user errors. - -The plugins can be associated with Ingress -or Service object in Kubernetes using `konghq.com/plugins` annotation. - -### Examples - -#### Applying a plugin to a service - -Given the following plugin: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id - echo_downstream: true -plugin: correlation-id -``` - -It can be applied to a service by annotating like: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp-service - labels: - app: myapp-service - annotations: - konghq.com/plugins: request-id -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: myapp-service - selector: - app: myapp-service -``` - -#### Applying a plugin to an ingress - -The KongPlugin above can be applied to a specific ingress (route or routes): - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -``` - -A plugin can also be applied to a specific KongConsumer by adding -`konghq.com/plugins` annotation to the KongConsumer resource. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how to use this resource. - -#### Applying a plugin with a secret configuration - -The plugin above can be modified to store its configuration in a secret: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -configFrom: - secretKeyRef: - name: plugin-conf-secret - key: request-id -plugin: correlation-id -``` - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: plugin-conf-secret -stringData: - request-id: | - header_name: my-request-id - echo_downstream: true -type: Opaque -``` - -## KongClusterPlugin - -A `KongClusterPlugin` is same as `KongPlugin` resource. The only differences -are that it is a Kubernetes cluster-level resource instead of a namespaced -resource, and can be applied as a global plugin using labels. - -Please consult the [KongPlugin](#kongplugin) section for details. - -*Example:* - -KongClusterPlugin example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: request-id - annotations: - kubernetes.io/ingress.class: - labels: - global: "true" # optional, if set, then the plugin will be executed - # for every request that Kong proxies - # please note the quotes around true -config: - header_name: my-request-id -configFrom: - secretKeyRef: - name: - key: - namespace: -plugin: correlation-id -``` - -As with KongPlugin, only one of `config` or `configFrom` can be used. - -Setting the label `global` to `"true"` will apply the plugin globally in Kong, -meaning it will be executed for every request that is proxied via Kong. - -## KongIngress - -Ingress resource spec in Kubernetes can define routing policies -based on HTTP Host header and paths. -While this is sufficient in most cases, -sometimes, users may want more control over routing at the Ingress level. -`KongIngress` serves as an "extension" to Ingress resource. -It is not meant as a replacement to the -`Ingress` resource in Kubernetes. - -Please read the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#kongingress) -document for why this resource exists and how it relates to the existing -Ingress resource. - -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and -[Route][kong-route] entities in Kong related to an Ingress resource -can be modified. - -Once a `KongIngress` resource is created, it needs to be associated with -an Ingress or Service resource using the following annotation: - -```yaml -konghq.com/override: kong-ingress-resource-name -``` - -Specifically, - -- To override any properties related to health-checking, load-balancing, - or details specific to a service, add the annotation to the Kubernetes - Service that is being exposed via the Ingress API. -- To override routing configuration (like protocol or method based routing), - add the annotation to the Ingress resource. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this resource. - -For reference, the following is a complete spec for KongIngress: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: configuration-demo -upstream: - slots: 10 - hash_on: none - hash_fallback: none - healthchecks: - threshold: 25 - active: - concurrency: 10 - healthy: - http_statuses: - - 200 - - 302 - interval: 0 - successes: 0 - http_path: "/" - timeout: 1 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - interval: 0 - tcp_failures: 0 - timeouts: 0 - passive: - healthy: - http_statuses: - - 200 - successes: 0 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - - 503 - tcp_failures: 0 - timeouts: 0 -proxy: - protocol: http - path: / - connect_timeout: 10000 - retries: 10 - read_timeout: 10000 - write_timeout: 10000 -route: - methods: - - POST - - GET - regex_priority: 0 - strip_path: false - preserve_host: true - protocols: - - http - - https -``` - -## TCPIngress - -The Ingress resource in Kubernetes is HTTP-only. -This custom resource is modeled similar to the Ingress resource but for -TCP and TLS SNI based routing purposes: - -```yaml -apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -spec: - rules: - - host: - port: - backend: - serviceName: - servicePort: -``` - -If `host` is not specified, then port-based TCP routing is performed. Kong -doesn't care about the content of TCP stream in this case. - -If `host` is specified, then Kong expects the TCP stream to be TLS-encrypted -and Kong will terminate the TLS session based on the SNI. -Also note that, the port in this case should be configured with `ssl` parameter -in Kong. - -## KongConsumer - -This custom resource configures a consumer in Kong: - -The following snippet shows the field available in the resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -username: -custom_id: -``` - -An example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: consumer-team-x - annotations: - kubernetes.io/ingress.class: kong -username: team-X -``` - -When this resource is created, a corresponding consumer entity will be -created in Kong. - -Consumers' `username` and `custom_id` values must be unique across the Kong -cluster. While KongConsumers exist in a specific Kubernetes namespace, -KongConsumers from all namespaces are combined into a single Kong -configuration, and no KongConsumers with the same `kubernetes.io/ingress.class` -may share the same `username` or `custom_id` value. - -## KongCredential (Deprecated) - -This custom resource can be used to configure a consumer specific -entities in Kong. -The resource reference the KongConsumer resource via the `consumerRef` key. - -The validation of the config object is left up to the user. - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongCredential -metadata: - name: credential-team-x -consumerRef: consumer-team-x -type: key-auth -config: - key: 62eb165c070a41d5c1b58d9d3d725ca1 -``` - -The following credential types can be provisioned using the KongCredential -resource: - -- `key-auth` for [Key authentication](/hub/kong-inc/key-auth/) -- `basic-auth` for [Basic authentication](/hub/kong-inc/basic-auth/) -- `hmac-auth` for [HMAC authentication](/hub/kong-inc/hmac-auth/) -- `jwt` for [JWT based authentication](/hub/kong-inc/jwt/) -- `oauth2` for [Oauth2 Client credentials](/hub/kong-inc/oauth2/) -- `acl` for [ACL group associations](/hub/kong-inc/acl/) - -Please ensure that all fields related to the credential in Kong -are present in the definition of KongCredential's `config` section. - -Please refer to the -[using the Kong Consumer and Credential resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource) -guide for details on how to use this resource. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ diff --git a/app/kubernetes-ingress-controller/1.0.x/references/plugin-compatibility.md b/app/kubernetes-ingress-controller/1.0.x/references/plugin-compatibility.md deleted file mode 100644 index 66250f3b167f..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/references/plugin-compatibility.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Plugin Compatibility ---- - -DB-less mode is the preferred choice for controller-managed Kong and Kong -Enterprise clusters. However, not all plugins are available in DB-less mode. -Review the table below to check if a plugin you wish to use requires a -database. - -Note that some DB-less compatible plugins have some limitations or require -non-default configuration for -[compatibility](/gateway/latest/reference/db-less-and-declarative-config/#plugin-compatibility). - -## Kong - -| Plugin | Kong | Kong (DB-less) | -|-------------------------|-------------------------------|-------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | - -## {{site.ee_product_name}} - -{% include md/enterprise/k8s-image-note.md %} - -| Plugin | Kong for Kubernetes with {{site.ee_product_name}} | Kong for Kubernetes Enterprise | -|----------------------------------|--------------------------------------------|-------------------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | -| application-registration | | 1 | -| canary release | | | -| collector | | | -| degraphql | | | -| exit-transformer | | | -| forward-proxy | | | -| graphql-proxy-cache-advanced | | | -| graphql-rate-limiting-advanced | | | -| jwt-signer | | | -| kafka-log | | | -| kafka-upstream | | | -| key-auth-enc | | | -| ldap-auth-advanced | | | -| mtls-auth | | | -| oauth2-introspection | | | -| openid-connect | | | -| proxy-cache-advanced | | | -| rate-limiting-advanced | | | -| request-transformer-advanced | | 2 | -| request-validator | | | -| response-transformer-advanced | | | -| route-transformer-advanced | | | -| statsd-advanced | | 3 | -| vault-auth | | | - -1 Only used with Dev Portal - -2 request-transformer now has feature parity with - request-transformer-advanced. request-transformer-advanced remains only for - compatibility with existing configurations. - -3 Only used with Vitals diff --git a/app/kubernetes-ingress-controller/1.0.x/references/version-compatibility.md b/app/kubernetes-ingress-controller/1.0.x/references/version-compatibility.md deleted file mode 100644 index 30f19141c152..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/references/version-compatibility.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Version Compatibility ---- - -Kong's Kubernetes ingress controller is compatible with different flavors of Kong. -The following sections detail on compatibility between versions. - -## Kong - -By Kong, we are here referring to the official distribution of the Open-Source -{{site.base_gateway}}. - -| {{site.kic_product_name}} | <= 0.0.4 | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong 0.13.x | | | | | | | | | | | | | | -| Kong 0.14.x | | | | | | | | | | | | | | -| Kong 1.0.x | | | | | | | | | | | | | | -| Kong 1.1.x | | | | | | | | | | | | | | -| Kong 1.2.x | | | | | | | | | | | | | | -| Kong 1.3.x | | | | | | | | | | | | | | -| Kong 1.4.x | | | | | | | | | | | | | | -| Kong 1.5.x | | | | | | | | | | | | | | -| Kong 2.0.x | | | | | | | | | | | | | | -| Kong 2.1.x | | | | | | | | | | | | | | -| Kong 2.2.x | | | | | | | | | | | | | | -| Kong 2.3.x | | | | | | | | | | | | | | - -## Kong-enterprise-k8s - -{% include md/enterprise/k8s-image-note.md %} - -Kong-enterprise-k8s is an official distribution by Kong, Inc. which bundles -all enterprise plugins into Open-Source {{site.base_gateway}}. - -The compatibility for this distribution will largely follow that of the -Open-Source {{site.base_gateway}} compatibility (the previous section). - -| {{site.kic_product_name}} | 0.6.2+ | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | -|:----------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong-enterprise-k8s 1.3.x.y | | | | | | | -| Kong-enterprise-k8s 1.4.x.y | | | | | | | -| Kong-enterprise-k8s 2.0.x.y | | | | | | | - -## {{site.ee_product_name}} - -{{site.ee_product_name}} is the official enterprise distribution, which includes all -other enterprise functionality, built on top of the Open-Source {{site.base_gateway}}. - -| {{site.kic_product_name}} | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| {{site.ee_product_name}} 0.32-x | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.33-x | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.34-x | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.35-x | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.36-x | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.3.x | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.5.x | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.1.x | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.2.x | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.3.x | | | | | | | | | | | | | - -## Kubernetes - -| {{site.kic_product_name}} | 0.9.x | 0.10.x | 1.0.x | -|:--------------------------|:---------------------------:|:---------------------------:|:---------------------------:| -| Kubernetes 1.13 | | | | -| Kubernetes 1.14 | | | | -| Kubernetes 1.15 | | | | -| Kubernetes 1.16 | | | | -| Kubernetes 1.17 | | | | -| Kubernetes 1.18 | | | | -| Kubernetes 1.19 | | | | diff --git a/app/kubernetes-ingress-controller/1.0.x/troubleshooting.md b/app/kubernetes-ingress-controller/1.0.x/troubleshooting.md deleted file mode 100644 index 214e983aeaad..000000000000 --- a/app/kubernetes-ingress-controller/1.0.x/troubleshooting.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Debug & Troubleshooting ---- - -## Debug - -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: - -- `--v=3` shows details about the service, Ingress rule, and endpoint changes - -## Authentication to the Kubernetes API Server - -A number of components are involved in the authentication process and the first step is to narrow -down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. -Both authentications must work: - -```text -+-------------+ service +------------+ -| | authentication | | -+ apiserver +<-------------------+ ingress | -| | | controller | -+-------------+ +------------+ - -``` - -## Service authentication - -The Ingress controller needs information from API server to configure Kong. -Therefore, authentication is required, which can be achieved in two different ways: - -1. **Service Account**: This is recommended - because nothing has to be configured. - The Ingress controller will use information provided by the system - to communicate with the API server. - See 'Service Account' section for details. -1. **Kubeconfig file**: In some Kubernetes environments - service accounts are not available. - In this case, a manual configuration is required. - The Ingress controller binary can be started with the `--kubeconfig` flag. - The value of the flag is a path to a file specifying how - to connect to the API server. Using the `--kubeconfig` - does not require the flag `--apiserver-host`. - The format of the file is identical to `~/.kube/config` - which is used by `kubectl` to connect to the API server. - See 'kubeconfig' section for details. - -## Discovering API-server - -Using this flag `--apiserver-host=http://localhost:8080`, -it is possible to specify an unsecured API server or -reach a remote Kubernetes cluster using -[kubectl proxy](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#using-kubectl-proxy). -Please do not use this approach in production. - -In the diagram below you can see the full authentication flow with all options, starting with the browser -on the lower left hand side. - -```text - -Kubernetes Workstation -+---------------------------------------------------+ +------------------+ -| | | | -| +-----------+ apiserver +------------+ | | +------------+ | -| | | proxy | | | | | | | -| | apiserver | | ingress | | | | ingress | | -| | | | controller | | | | controller | | -| | | | | | | | | | -| | | | | | | | | | -| | | service account/ | | | | | | | -| | | kubeconfig | | | | | | | -| | +<-------------------+ | | | | | | -| | | | | | | | | | -| +------+----+ kubeconfig +------+-----+ | | +------+-----+ | -| |<--------------------------------------------------------| | -| | | | -+---------------------------------------------------+ +------------------+ -``` - -## Service Account - -If using a service account to connect to the API server, Dashboard expects the file -`/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret -token that is required to authenticate with the API server. - -Verify with the following commands: - -```shell -# start a container that contains curl -$ kubectl run test --image=tutum/curl -- sleep 10000 - -# check that container is running -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -test-701078429-s5kca 1/1 Running 0 16s - -# check if secret exists -$ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt -namespace -token - -# get service IP of master -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes 10.0.0.1 443/TCP 1d - -# check base connectivity from cluster inside -$ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 -Unauthorized - -# connect using tokens -$ TOKEN_VALUE=$(kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token) -$ echo $TOKEN_VALUE -eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A -$ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $TOKEN_VALUE" https://10.0.0.1 -{ - "paths": [ - "/api", - "/api/v1", - "/apis", - "/apis/apps", - "/apis/apps/v1alpha1", - "/apis/authentication.k8s.io", - "/apis/authentication.k8s.io/v1beta1", - "/apis/authorization.k8s.io", - "/apis/authorization.k8s.io/v1beta1", - "/apis/autoscaling", - "/apis/autoscaling/v1", - "/apis/batch", - "/apis/batch/v1", - "/apis/batch/v2alpha1", - "/apis/certificates.k8s.io", - "/apis/certificates.k8s.io/v1alpha1", - "/apis/extensions", - "/apis/extensions/v1beta1", - "/apis/policy", - "/apis/policy/v1alpha1", - "/apis/rbac.authorization.k8s.io", - "/apis/rbac.authorization.k8s.io/v1alpha1", - "/apis/storage.k8s.io", - "/apis/storage.k8s.io/v1beta1", - "/healthz", - "/healthz/ping", - "/logs", - "/metrics", - "/swaggerapi/", - "/ui/", - "/version" - ] -} -``` - -If it is not working, there are two possible reasons: - -1. The contents of the tokens are invalid. - Find the secret name with `kubectl get secrets --field-selector=type=kubernetes.io/service-account-token` and - delete it with `kubectl delete secret `. - It will automatically be recreated. -1. You have a non-standard Kubernetes installation - and the file containing the token may not be present. - -The API server will mount a volume containing this file, -but only if the API server is configured to use -the ServiceAccount admission controller. -If you experience this error, -verify that your API server is using the ServiceAccount admission controller. -If you are configuring the API server by hand, -you can set this with the `--admission-control` parameter. -Please note that you should use other admission controllers as well. -Before configuring this option, please read about admission controllers. - -More information: - -- [User Guide: Service Accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [Cluster Administrator Guide: Managing Service Accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) - -## Kubeconfig - -If you want to use a kubeconfig file for authentication, -follow the deploy procedure and -add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the deployment diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/custom-resources.md b/app/kubernetes-ingress-controller/1.1.x/concepts/custom-resources.md deleted file mode 100644 index 838984eec700..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/custom-resources.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Custom Resources ---- - -[Custom Resources][k8s-crd] in Kubernetes allow controllers -to extend Kubernetes-style -declarative APIs that are specific to certain applications. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -The {{site.kic_product_name}} uses the `configuration.konghq.com` API group -for storing configuration specific to Kong. - -The following CRDs allow users to declaratively configure all aspects of Kong: - -- [**KongIngress**](#kongingress) -- [**KongPlugin**](#kongplugin) -- [**KongClusterPlugin**](#kongclusterplugin) -- [**KongConsumer**](#kongconsumer) -- [**TCPIngress**](#tcpingress) -- [**KongCredential (Deprecated)**](#kongcredential-deprecated) - -## KongIngress - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, `KongIngress` Custom Resource is used as an -"extension" to the existing Ingress API to provide fine-grained control -over proxy behavior. -In other words, `KongIngress` works in conjunction with -the existing Ingress resource and extends it. -It is not meant as a replacement for the `Ingress` resource in Kubernetes. -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and [Route][kong-route] -entities in Kong related to an Ingress resource can be modified. - -Once a `KongIngress` resource is created, you can use the `configuration.konghq.com` -annotation to associate the `KongIngress` resource with an `Ingress` or a `Service` -resource: - -- When the annotation is added to the `Ingress` resource, the routing - configurations are updated, meaning all routes associated with the annotated - `Ingress` are updated to use the values defined in the `KongIngress`'s route - section. -- When the annotation is added to a `Service` resource in Kubernetes, - the corresponding `Service` and `Upstream` in Kong are updated to use the - `proxy` and `upstream` blocks as defined in the associated - `KongIngress` resource. - -The below diagram shows how the resources are linked -with one another: - -![Associating Kong Ingress](/assets/images/products/kubernetes-ingress-controller/kong-ingress-association.png "Associating Kong Ingress") - -## KongPlugin - -Kong is designed around an extensible [plugin][kong-plugin] -architecture and comes with a -wide variety of plugins already bundled inside it. -These plugins can be used to modify the request/response or impose restrictions -on the traffic. - -Once this resource is created, the resource needs to be associated with an -`Ingress`, `Service`, or `KongConsumer` resource in Kubernetes. -For more details, please read the reference documentation on `KongPlugin`. - -The below diagram shows how you can link `KongPlugin` resource to an -`Ingress`, `Service`, or `KongConsumer`: - -| | | -:-:|:-: -![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association1.png)|![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association2.png) - -## KongClusterPlugin - -_This resource requires the [`kubernetes.io/ingress.class` annotation](/kubernetes-ingress-controller/{{page.release}}/references/annotations/)._ - -KongClusterPlugin resource is exactly same as KongPlugin, except that it is a -Kubernetes cluster-level resources instead of being a namespaced resource. -This can help when the configuration of the plugin needs to be centralized -and the permissions to add/update plugin configuration rests with a different -persona than application owners. - -This resource can be associated with `Ingress`, `Service` or `KongConsumer` -and can be used in the exact same way as KongPlugin. - -A namespaced KongPlugin resource takes priority over a -KongClusterPlugin with the same name. - -## KongConsumer - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This custom resource configures `Consumers` in Kong. -Every `KongConsumer` resource in Kubernetes directly translates to a -[Consumer][kong-consumer] object in Kong. - -## TCPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing non-HTTP -and non-GRPC services running inside Kubernetes to -the outside world via Kong. This proves to be useful when -you want to use a single cloud LoadBalancer for all kinds -of traffic into your Kubernetes cluster. - -It is very similar to the Ingress resource that ships with Kubernetes. - -## KongCredential (Deprecated) - -Once a `KongConsumer` resource is created, -credentials associated with the `Consumer` can be provisioned inside Kong -using KongCredential custom resource. - -This Custom Resource has been deprecated and will be removed in a future -release. -Instead, please use secret-based credentials. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/deployment.md b/app/kubernetes-ingress-controller/1.1.x/concepts/deployment.md deleted file mode 100644 index 3fee76eeebd9..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/deployment.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Kong Ingress Controller Deployment ---- - -The {{site.kic_product_name}} is designed to be deployed in a variety of ways -based on uses-cases. This document explains various components involved -and choices one can make as per the specific use-case. - -- [**Kubernetes Resources**](#kubernetes-resources): - Various Kubernetes resources required to run the {{site.kic_product_name}}. -- [**Deployment options**](#deployment-options): - A high-level explanation of choices that one should consider and customize - the deployment to best serve a specific use case. - -## Kubernetes Resources - -The following resources are used to run the {{site.kic_product_name}}: - -- [Namespace](#namespace) -- [Custom resources](#custom-resources) -- [RBAC permissions](#rbac-permissions) -- [Ingress Controller Deployment](#ingress-controller-deployment) -- [Kong Proxy service](#kong-proxy-service) -- [Database deployment and migrations](#database-deployment-and-migration) - -These resources are created if the reference deployment manifests are used -to deploy the {{site.kic_product_name}}. -The resources are explained below for users to gain an understanding of how -they are used, so that they can be tweaked as necessary for a specific use-case. - -### Namespace - -> optional - -The {{site.kic_product_name}} can be deployed in any [namespace][k8s-namespace]. -If {{site.kic_product_name}} is being used to proxy traffic for all namespaces -in a Kubernetes cluster, which is generally the case, -it is recommended that it is installed in a dedicated -`kong` namespace but it is not required to do so. - -The example deployments present in this repository automatically create a `kong` -namespace and deploy resources into that namespace. - -### Custom Resources - -> required - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, custom resources are used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to the [custom resources](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/) -concept document for details. - -### RBAC permissions - -> required - -The {{site.kic_product_name}} communicates with the Kubernetes API-server and -dynamically configures Kong to automatically load balance across pods -of a service as any service is scaled in our out. - -For this reason, it requires RBAC permissions to access resources stored -in Kubernetes object store. - -It needs read permissions (get,list,watch) -on the following Kubernetes resources: - -- Endpoints -- Nodes -- Pods -- Secrets -- Ingress -- KongPlugins -- KongConsumers -- KongCredentials -- KongIngress - -By default, the controller listens for events and above resources across -all namespaces and will need access to these resources at the cluster level -(using `ClusterRole` and `ClusterRoleBinding`). - -In addition to these, it needs: - -- Create a ConfigMap and read and update ConfigMap for to facilitate - leader-election. Please read this [document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) - for more details. -- Update permission on the Ingress resource to update the status of - the Ingress resource. - -If the Ingress Controller is listening for events on a single namespace, -these permissions can be updated to restrict these permissions to a specific -namespace using `Role` and `RoleBinding resources`. - -In addition to these, it is necessary to create a `ServiceAccount`, which -has the above permissions. The Ingress Controller Pod then has this -`ServiceAccount` association. This gives the Ingress Controller process -necessary authentication and authorization tokens to communicate with the -Kubernetes API-server. - -### Ingress Controller deployment - -> required - -Kong Ingress deployment consists of the Ingress Controller deployed alongside -Kong. The deployment will be different depending on if a database is being -used or not. - -The deployment(s) is the core which actually runs the {{site.kic_product_name}}. - -See the [database](#database) section below for details. - -### Kong Proxy service - -> required - -Once the {{site.kic_product_name}} is deployed, one service is needed to -expose Kong outside the Kubernetes cluster so that it can receive all traffic -that is destined for the cluster and route it appropriately. -`kong-proxy` is a Kubernetes service which points to the Kong pods which are -capable of proxying request traffic. This service will be usually of type -`LoadBalancer`, however it is not required to be such. -The IP address of this service should be used to configure DNS records -of all the domains that Kong should be proxying, to route the traffic to Kong. - -### Database deployment and migration - -> optional - -The {{site.kic_product_name}} can run with or without a database. -If a database is being deployed, then following resources are required: - -- A `StatefulSet` which runs a PostgreSQL pod backed with a `PersistenceVolume` - to store Kong's configuration. -- An internal `Service` which resolves to the PostgreSQL pod. This ensures - that Kong can find the PostgreSQL instance using DNS inside - the Kubernetes cluster. -- A batch `Job` to run schema migrations. This is required to be executed once - to install bootstrap Kong's database schema. - Please note that on an any upgrade for Kong version, another `Job` will - need to be created if the newer version contains any migrations. - -To figure out if you should be using a database or not, please refer to the -[database](#database) section below. - -## Deployment options - -Following are the difference options to consider while deploying the -{{site.kic_product_name}} for your specific use case: - -- [**Kubernetes Service Type**](#kubernetes-service-types): - Chose between Load Balancer vs Node-Port -- [**Database**](#database): - Backing Kong with a Database or running without a database -- [**Multiple Ingress Controllers**](#multiple-ingress-controllers): - Running multiple {{site.kic_product_name}}s inside the same Kubernetes cluster -- [**Runtime**](#runtime): - Using Kong or {{site.ee_product_name}} (for {{site.ee_product_name}} customers) - -### Kubernetes Service Types - -Once deployed, any Ingress Controller needs to be exposed outside the -Kubernetes cluster to start accepting external traffic. -In Kubernetes, `Service` abstraction is used to expose any application -to the rest of the cluster or outside the cluster. - -If your Kubernetes cluster is running in a cloud environment, where -Load Balancers can be provisioned with relative ease, it is recommended -that you use a Service of type `LoadBalancer` to expose Kong to the outside -world. For the Ingress Controller to function correctly, it is also required -that a L4 (or TCP) Load Balancer is used and not an L7 (HTTP(s)) one. - -If your Kubernetes cluster doesn't support a service of type `LoadBalancer`, -then it is possible to use a service of type `NodePort`. - -### Database - -Until Kong 1.0, a database was required to run Kong. -Kong 1.1 introduced a new mode, DB-less, in which Kong can be configured -using a config file, and removes the need to use a database. - -It is possible to deploy and run the {{site.kic_product_name}} with or without a -database. The choice depends on the specific use-case and results in no -loss of functionality. - -#### Without a database - -In DB-less deployments, Kong's Kubernetes ingress controller runs -alongside Kong and configures Kong and dynamically configures -Kong as per the changes it receives from the Kubernetes API server. - -Following figure shows how this deployment looks like: - -![Kong DB-less](/assets/images/products/kubernetes-ingress-controller/dbless-deployment.png "Kong DB-less architecture") - -In this deployment, only one Deployment is required, which is comprised of -a Pod with two containers, a Kong container which proxies the requests -and a controller container which configures Kong. - -`kong-proxy` service would point to the ports of the Kong container in the -above deployment. - -Since each pod contains a controller and a Kong container, scaling out -simply requires horizontally scaling this deployment to handle more traffic -or to add redundancy in the infrastructure. - -#### With a Database - -In a deployment where Kong is backed by a DB, the deployment architecture -is a little different. - -Please refer to the below figure: - -![Kong with a Database](/assets/images/products/kubernetes-ingress-controller/db-deployment.png "Kong with database") - -In this type of deployment, there are two types of deployments created, -separating the control and data flow: - -- **Control-plane**: This deployment consists of a pod(s) running - the controller alongside - a Kong container, which can only configure the database. This deployment - does not proxy any traffic but only configures Kong. If multiple - replicas of this pod are running, a leader election process will ensure - that only one of the pods is configuring Kong's database at a time. -- **Data-plane**: This deployment consists of pods running a - single Kong container which can proxy traffic based on the configuration - it loads from the database. This deployment should be scaled to respond - to change in traffic profiles and add redundancy to safeguard from node - failures. -- **Database**: The database is used to store Kong's configuration and propagate - changes to all the Kong pods in the cluster. All Kong containers, in the - cluster should be able to connect to this database. - -A database driven deployment should be used if your use-case requires -dynamic creation of Consumers and/or credentials in Kong at a scale large -enough that the consumers will not fit entirely in memory. - -## Multiple Ingress Controllers - -It is possible to run multiple instances of the {{site.kic_product_name}} or -run a Kong {{site.kic_product_name}} alongside other Ingress Controllers inside -the same Kubernetes cluster. - -There are a few different ways of accomplishing this: - -- Using `kubernetes.io/ingress.class` annotation: - It is common to deploy Ingress Controllers on a cluster level, meaning - an Ingress Controller will satisfy Ingress rules created in all the namespaces - inside a Kubernetes cluster. - Use the annotation on Ingress and Custom resources to segment - the Ingress resources between multiple Ingress Controllers. - **Warning!** - When you use another Ingress Controller, which is default for cluster - (without set any `kubernetes.io/ingress.class`), be aware of using default `kong` - ingress class. There is special behavior of the default `kong` ingress class, - where any ingress resource that is not annotated is picked up. - Therefore with different ingress class then `kong`, you have to use that - ingress class with every Kong CRD object (plugin, consumer) which you use. -- Namespace based isolation: - {{site.kic_product_name}} supports a deployment option where it will satisfy - Ingress resources in a specific namespace. With this model, one can deploy - a controller in multiple namespaces and they will run in an isolated manner. -- If you are using {{site.ee_product_name}}, you can run multiple Ingress Controllers - pointing to the same database and configuring different Workspaces inside - {{site.ee_product_name}}. With such a deployment, one can use either of the above - two approaches to segment Ingress resources into different Workspaces in - {{site.ee_product_name}}. - -## Runtime - -The {{site.kic_product_name}} is compatible a variety of runtimes: - -### {{site.base_gateway}} (OSS) - -This is the [Open-Source Gateway](https://github.com/kong/kong) runtime. -The Ingress Controller is primarily developed against releases of the -open-source gateway. - -### {{site.ee_product_name}} K8S - -If you are a {{site.ee_product_name}} customer, you have access to two more runtimes. - -The first one, {{site.ee_product_name}} K8S, is an package that takes the Open-Source -{{site.base_gateway}} and adds enterprise-only plugins to it. - -You simply need to deploy {{site.ee_product_name}} K8S instead of the Open-Source -Gateway in-order to take full-advantage of enterprise plugins. - -### {{site.ee_product_name}} - -The {{site.kic_product_name}} is also compatible with the full-blown version of -{{site.ee_product_name}}. This runtime ships with Kong Manager, Kong Portal, and a -number of other enterprise-only features. -[This doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise) provides a high-level -overview of the architecture. - -[k8s-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/design.md b/app/kubernetes-ingress-controller/1.1.x/concepts/design.md deleted file mode 100644 index 62402fec8f0b..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/design.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kong Ingress Controller Design ---- - -## Overview - -The {{site.kic_product_name}} configures Kong -using Ingress resources created inside a Kubernetes cluster. - -The {{site.kic_product_name}} is made up of two components: - -- Kong, the core proxy that handles all the traffic -- Controller, a process that syncs the configuration from Kubernetes to Kong - -The {{site.kic_product_name}} performs more than just proxying the traffic coming -into a Kubernetes cluster. It is possible to configure plugins, -load balancing, health checking and leverage all that Kong offers in a -standalone installation. - -The following figure shows how it works: - -![high-level-design](/assets/images/products/kubernetes-ingress-controller/high-level-design.png "High Level Design") - -The Controller listens for changes happening inside the Kubernetes -cluster and updates Kong in response to those changes to correctly -proxy all the traffic. - -Kong is updated dynamically to respond to changes around scaling, -configuration changes, failures that are happening inside a Kubernetes -cluster. - -## Translation - -Kubernetes resources are mapped to Kong resources to correctly -proxy all the traffic. - -The following figure describes the mapping between Kubernetes concepts -to Kong's configuration: - -![translating k8s to kong](/assets/images/products/kubernetes-ingress-controller/k8s-to-kong.png "Translating k8s resources to Kong") - -Let's go through how Kubernetes resources are being mapped to Kong's -configuration: - -- An [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) - resource in Kubernetes defines a set of rules for proxying - traffic. These rules corresponds to the concept of Route in Kong. -- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - inside Kubernetes is a way to abstract an application that is - running on a set of pods. - This maps to two objects in Kong: Service and Upstream. - The service object in Kong holds the information on the protocol - to use to talk to the upstream service and various other protocol - specific settings. The Upstream object defines load balancing - and healthchecking behavior. -- Pods associated with a Service in Kubernetes map as a Target belonging - to the Upstream (the upstream corresponding to the Kubernetes - Service) in Kong. Kong load balances across the Pods of your service. - This means that all requests flowing through Kong are not directed via - kube-proxy but directly to the pod. - -For more information on how Kong works with Routes, Services, and Upstreams, -please see the [Proxy](/gateway/latest/reference/proxy/) -and [Load balancing](/gateway/latest/reference/loadbalancing/) references. diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/ha-and-scaling.md b/app/kubernetes-ingress-controller/1.1.x/concepts/ha-and-scaling.md deleted file mode 100644 index 86b6e710c9f4..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/ha-and-scaling.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: High-availability and Scaling ---- - -## High availability - -The {{site.kic_product_name}} is designed to be reasonably easy to operate and -be highly available, meaning, when some expected failures do occur, the -Controller should be able to continue to function with minimum possible -service disruption. - -The {{site.kic_product_name}} is composed of two parts: 1. Kong, which handles -the requests, 2. Controller, which configures Kong dynamically. - -Kong itself can be deployed in a Highly available manner by deploying -multiple instances (or pods). Kong nodes are state-less, meaning a Kong pod -can be terminated and restarted at any point of time. - -The controller itself can be stateful or stateless, depending on if a database -is being used or not. - -If a database is not used, then the Controller and Kong are deployed as -colocated containers in the same pod and each controller configures the Kong -container that it is running with. - -For cases when a database is necessary, the Controllers can be deployed -on multiple zones to provide redudancy. In such a case, a leader election -process will elect one instance as a leader, which will manipulate Kong's -configuration. - -### Leader election - -The {{site.kic_product_name}} performs a leader-election when multiple -instances of the controller are running to ensure that only a single Controller -is actively pushing changes to Kong's database (when running in DB-mode). -If multiple controllers are making changes to the database, it is possible that -the controllers step over each other. -If an instance of the controller fails, any other container which is a follower, -takes up the leadership and then continues syncing Kong's configuration from -Kubernetes. - -For this reason, the Controller needs permission to create a ConfigMap. -By default, the permission is given at Cluster level but it can be narrowed -down to a single namespace (using Role and RoleBinding) for a stricter RBAC -policy. - -It also needs permission to read and update this ConfigMap. -This permission can be specific to the ConfigMap that is being used -for leader-election purposes. -The name of the ConfigMap is derived from the value of election-id CLI flag -(default: `ingress-controller-leader`) and -ingress-class (default: `kong`) as: "-". -For example, the default ConfigMap that is used for leader election will -be "ingress-controller-leader-kong", and it will be present in the same -namespace that the controller is deployed in. - -## Scaling - -Kong is designed to be horizontally scalable, meaning as traffic increases, -multiple instances of Kong can be deployed to handle the increase in load. - -The configuration is either pumped into Kong directly via the Ingress -Controller or loaded via the database. Kong containers can be considered -stateless as the configuration is either loaded from the database (and -cached heavily in-memory) or loaded in-memory directly via a config file. - -One can use a `HorizontalPodAutoscaler` (HPA) based on metrics -like CPU utilization, bandwidth being used, total request count per second -to dynamically scale {{site.kic_product_name}} as the traffic profile changes. diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-classes.md b/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-classes.md deleted file mode 100644 index 85790f0dbd66..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-classes.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Kong Ingress Controller and Ingress Class ---- - -## Introduction - -The {{site.kic_product_name}} uses ingress classes to filter Kubernetes Ingress -objects and other resources before converting them into Kong configuration. -This allows it to coexist with other ingress controllers and/or other -deployments of the {{site.kic_product_name}} in the same cluster: a -{{site.kic_product_name}} will only process configuration marked for its use. - -## Configuring the controller ingress class - -The `--ingress-class` flag (or `CONTROLLER_INGRESS_CLASS` environment variable) -specify the ingress class expected by the {{site.kic_product_name}}. By default, -it expects the `kong` class. - -## Loading resources by class - -The {{site.kic_product_name}} translates a variety of Kubernetes resources into -Kong configuration. Broadly speaking, we can separate these resources into two -categories: - -- Resources that the controller translates directly into Kong configuration. -- Resources referenced by some other resource, where the other resource is - directly translated into Kong configuration. - -For example, an Ingress is translated directly into a Kong route, and a -KongConsumer is translated directly into a -[Kong consumer](/gateway/api/admin-ee/latest/#/Consumers/list-consumer/). A Secret containing -an authentication plugin credential is _not_ translated directly: it is only -translated into Kong configuration if a KongConsumer resource references it. - -Because they create Kong configuration independent of any other resources, -directly-translated resources require an ingress class, and their class must -match the class configured for the controller. Referenced resources do not -require a class, but must be referenced by a directly-translated resource -that matches the controller. - -### Adding class information to resources - -Most resources use a [kubernetes.io/ingress-class annotation][class-annotation] -to indicate their class. There are several exceptions: - -- v1 Ingress resources have a dedicated `class` field. -- Knative Services [use the class specified][knative-class] by the - `ingress.class` key of the Knative installation's `config-network` ConfigMap. - You can optionally [override this on a per-Service basis][knative-override] - by adding a `networking.knative.dev/ingress.class` annotation to the Service. - -### Enabling support for classless resources - -Specifying a class is optional for some resources. Although specifying a class -is recommended, you can instruct the controller to process resources without a -class annotation using flags: - -- `--process-classless-ingress-v1beta1` instructs the controller to translate - v1beta1 Ingress resources with no class annotation. -- `--process-classless-kong-consumer` instructs the controller to translate - KongConsumer resources with no class annotation. - -These flags are primarily intended for compatibility with older configuration -({{site.kic_product_name}} before 0.10 had less strict class -requirements, and it was common to omit class annotations). If you are creating -new configuration and do not have older configuration without class -annotations, recommended best practice is to add class information to Ingress -and KongConsumer resources and not set the above flags. Doing so avoids -accidentally creating duplicate configuration in other ingress controller -instances. - -These flags do not _ignore_ `ingress.class` annotations: they allow resources -with no such annotation, but will not allow resource that have a non-matching -`ingress.class` annotation. - -## When to use a custom class - -Using the default `kong` class is fine for simpler deployments, where only one -{{site.kic_product_name}} instance is running in a cluster. Changing the class is -typical when: - -- You install multiple Kong environments in one Kubernetes cluster to handle - different types of ingress traffic, e.g. when using separate Kong instances - to handle traffic on internal and external load balancers, or deploying - different types of non-production environments in a single test cluster. -- You install multiple controller instances alongside a single Kong cluster to - separate configuration into different Kong workspaces (using the - `--kong-workspace` flag) or to restrict which Kubernetes namespaces any one - controller instance has access to. - -## Legacy behavior - -This overview covers behavior in {{site.kic_product_name}} version 0.10.0 onward. -Earlier versions had a special case for the default class and a bug affecting -custom classes: - -- When using the default `kong` class, the controller would always process - classless resources in addition to `kong`-class resources. When using a - non-default controller class, the controller would only process resources - with that class, not classless resources. Although this was by design, it was - a source of user confusion. -- When using a custom controller class, some resources that should not have - required a class (because they were referenced by other resources) - effectively did require a class: while these resources were loaded initially, - the controller would not track updates to them unless they had a class - annotation. - -In versions 0.10.0+ you must instruct the controller to load classless -resources, which is allowed (but not recommended) for either the default or -custom classes. Resources referenced by another resource are always loaded and -updated correctly regardless of which class you set on the controller; you do -not need to add class annotations to these resources when using a custom class. - -## Examples - -Typical configurations will include a mix of resources that have class -information and resources that are referenced by them. For example, consider -the following configuration for authenticating a request, using a KongConsumer, -credential Secret, Ingress, and KongPlugin (a Service is implied, but not -shown): - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: dyadya-styopa - annotations: - kubernetes.io/ingress.class: "kong" -username: styopa -credentials: -- styopa-key - ---- - -kind: Secret -apiVersion: v1 -stringData: - key: bylkogdatomoryakom - kongCredType: key-auth -metadata: - name: styopa-key - ---- - -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: ktonezhnaet - annotations: - kubernetes.io/ingress.class: "kong" - konghq.com/plugins: "key-auth-example" -spec: - rules: - - http: - paths: - - path: /vsemznakom - backend: - serviceName: httpbin - servicePort: 80 - ---- - -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: key-auth-example -plugin: key-auth -``` - -The KongConsumer and Ingress resources both have class annotations, as they are -resources that the controller uses as a basis for building Kong configuration. -The Secret and KongPlugin _do not_ have class annotations, as they are -referenced by other resources that do. - -[class-annotation]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#kubernetesioingressclass -[knative-class]: /kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/#ingress-class -[knative-override]: https://knative.dev/docs/serving/services/ingress-class/ diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-versions.md b/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-versions.md deleted file mode 100644 index 456959b77e5f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/ingress-versions.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Ingress v1 and v1beta1 Differences ---- - -## Introduction - -Kubernetes 1.19 introduced a new `networking.k8s.io/v1` API for the [Ingress resource][kubernetes-ingress-doc]. -It standardizes common practices and clarifies implementation requirements that -were previously up to individual controller vendors. This document covers those -changes as they relate to {{site.kic_product_name}} and provides sample -equivalent `networking.k8s.io/v1beta1` and `networking.k8s.io/v1` resources for comparison. - -## Paths - -Both Ingress v1beta1 and v1 HTTP rules require a path, which represents a [URI -path][uri-rfc-paths]. Although v1beta1 had specified that paths were [POSIX -regular expressions][posix-regex] and enforced this, in practice most -controllers used other implementations that did not match the -specification. v1 seeks to reduce confusion by introducing several [path -types][path-types] and lifting restrictions on regular expression grammars used -by controllers. - -### networking.k8s.io/v1beta1 - -The controller passes paths directly to Kong and relies on its [path handling -logic][kong-paths]. The Kong proxy treats paths as a prefix unless they include -characters [not allowed in RFC 3986 paths][uri-rfc-paths], in which case the -proxy assumes they are a regular expression, and does not treat slashes as -special characters. For example, the prefix `/foo` can match any of the -following: - -``` -/foo -/foo/ -/foobar -/foo/bar -``` - -### networking.k8s.io/v1 - -Although v1 Ingresses provide path types with more clearly-defined logic, the -controller must still create Kong routes and work within the Kong proxy's -routing logic. As such, the controller translates Ingress rule paths to create -Kong routes that match one of the following specifications: `Exact`, `Prefix`, or `ImplementationSpecific`. - -#### Exact - -If `pathType` is `Exact`, the controller creates a Kong route with a regular -expression that matches the rule path only. For example, an exact rule for `/foo` in an -Ingress translates to a Kong route with a `/foo$` regular expression path. - -#### Prefix - -If `pathType` is `Prefix`, the controller creates a Kong route with two path -criteria. For example, `/foo` will create a route with a `/foo$` regular expression and -`/foo/` plain path. - -#### ImplementationSpecific - -The controller leaves `ImplementationSpecific` path rules entirely up to the Kong -router. It creates a route with the exact same path string as the Ingress rule. - -{:.important} -> Both `Prefix` and `Exact` paths modify the paths you -provide, and those modifications may interfere with user-provided regular -expressions. If you are using your own regular expressions in paths, use -`ImplementationSpecific` to avoid unexpected behavior. -## Ingress class - -[Ingress class][ingress-class] indicates which resources an ingress controller -should process. It provides a means to separate out configuration intended for -other controllers or other instances of the {{site.kic_product_name}}. - -In v1beta1, ingress class was handled informally using -`kubernetes.io/ingress.class` [annotations][deprecated-annotation]. v1 -introduces a new [IngressClass resource][ingress-class-api] which provides -richer information about the controller. v1 Ingresses are bound to a class via -their `ingressClassName` field. - -For example, consider this v1beta1 Ingress: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kubernetes.io/ingress.class: "kong" -spec: - rules: - - host: example.com - http: - paths: - - path: /test - backend: - serviceName: echo - servicePort: 80 -``` - -Its ingress class annotation is set to `kong`, and ingress controllers set to -process `kong` class Ingresses will process it. - -In v1, the equivalent configuration declares a `kong` IngressClass resource -whose `metadata.name` field indicates the class name. The `ingressClassName` -value of the Ingress object must match the value of the `name` field in the -IngressClass metadata: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: kong -spec: - controller: ingress-controllers.konghq.com/kong ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - service: - name: test - port: - number: 80 -``` - -## Hostnames - -Ingress v1 formally codifies support for [wildcard hostnames][wildcard-hostnames]. -v1beta1 Ingresses did not reject wildcard hostnames, however, and Kong had -[existing support for them][kong-wildcard-hostnames]. - -As such, while the v1beta1 specification did not officially support wildcard -hostnames, you can use wildcard hostnames with either version. Setting a -hostname like `*.example.com` will match requests for both `foo.example.com` -and `bar.example.com` with either v1 or v1beta1 Ingresses. - -## Backend types - -Ingress v1 introduces support for backends other than Kubernetes Services through -[resource backends][resource-backends]. - -Kong does not support any dedicated resource backend configurations, though it -does have support for Routes without Services in some cases (for example, when -using the [AWS Lambda plugin][lambda-plugin]). For these routes, you should -create a placeholder Kubernetes Service for them, using an [ExternalName -Service][external-name] with an RFC 2606 invalid hostname, e.g. -`kong.invalid`. You can use these placeholder services with either v1 or -v1beta1 Ingresses. - -[kubernetes-ingress-doc]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-class]: /kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes -[uri-rfc-paths]: https://tools.ietf.org/html/rfc3986#section-3.3 -[posix-regex]: https://www.boost.org/doc/libs/1_38_0/libs/regex/doc/html/boost_regex/syntax/basic_extended.html -[path-types]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -[kong-paths]: /gateway/latest/reference/proxy/#request-path -[wildcard-hostnames]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards -[kong-wildcard-hostnames]: /gateway/latest/reference/proxy/#using-wildcard-hostnames -[resource-backends]: https://kubernetes.io/docs/concepts/services-networking/ingress/#resource-backend -[lambda-plugin]: /hub/kong-inc/aws-lambda/ -[external-name]: https://kubernetes.io/docs/concepts/services-networking/service/#externalname -[deprecated-annotation]: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation -[ingress-class-api]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-class-v1/ diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/k4k8s-with-kong-enterprise.md b/app/kubernetes-ingress-controller/1.1.x/concepts/k4k8s-with-kong-enterprise.md deleted file mode 100644 index 4331901f398f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/k4k8s-with-kong-enterprise.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -Kong for Kubernetes is a {{site.kic_product_name}} built on top -of Open-Source {{site.base_gateway}}. - -If you are an Enterprise customer, you have an option of running the -[Enterprise version](/gateway/latest/install-and-run/kubernetes/) -of the Ingress Controller, which includes -all the Enterprise plugins but does not include Kong Manager or any -other Enterprise features. This makes it possible to -run the Ingress layer without a database, providing a very low -operational and maintenance footprint. - -However, in some cases, those enterprise features are necessary, -and for such use-cases we support another deployment - Kong for -Kubernetes with {{site.ee_product_name}}. - -As seen in the diagram below, this deployment consists of -Kong for Kubernetes deployed in Kubernetes, and is hooked up with -a database. If there are services running outside Kubernetes, -a regular {{site.base_gateway}} proxy can be deployed there and connected to the -same database. This provides a single pane of visibility of -all services that are running in your infrastructure. - -![architecture-overview](/assets/images/products/kubernetes-ingress-controller/k4k8s-with-kong-enterprise.png "K4K8S with {{site.ee_product_name}}") - -In this deployment model, the database for Kong can be hosted anywhere. -It can be a managed DBaaS service like Amazon RDS, Google Cloud -SQL or a Postgres instance managed in-house or even an instance -deployed on Kubernetes. -If you are following this model, please keep in mind the following: -- It is recommended to not deploy Postgres on Kubernetes, - due to the fact that running stateful applications on Kubernetes - is challenging to get right. -- Ensure that you have the same image/package of {{site.ee_product_name}} - running across the fleet. This means that all Kong instances that are - connected to the same database must use the - same version of {{site.ee_product_name}} package. - -[This guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise) -walks through the setup of the above architecture. diff --git a/app/kubernetes-ingress-controller/1.1.x/concepts/security.md b/app/kubernetes-ingress-controller/1.1.x/concepts/security.md deleted file mode 100644 index bbb08cd9073b..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/concepts/security.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Security ---- - -This document explains the security aspects of the {{site.kic_product_name}}. - -The {{site.kic_product_name}} communicates with Kubernetes API-server and Kong's -Admin API. APIs on both sides offer authentication/authorization features -and the controller integrates with them gracefully. - -## Kubernetes RBAC - -The {{site.kic_product_name}} is deployed with RBAC permissions as explained in the -[deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment) document. -It has read and list permissions on most resources but requires update -and create permission for a few resources to provide seamless integration. -The permissions can be locked down further if needed depending on the specific -use-case. -This RBAC policy is associated with a ServiceAccount and the ServiceAccount -is associated with the {{site.kic_product_name}}. -The Controller uses the ServiceAccount credential to authenticate and -authorize itself against the Kubernetes API-server. - -## Kong Admin API Protection - -Kong's Admin API is used to control configuration of Kong and proxying behavior. -If an attacker happens to gain access to Kong's Admin API, they -will be able to perform all actions as an authorized user like -modifying or deleting Kong's configuration. -Hence, it is important that the deployment -ensures that the likelihood of this happening is as small as possible. - -In the example deployments, the Controller and Kong's Admin API communicate -over the loopback (`lo`) interface of the pod. -Kong is not performing any kind of authorization or -authentication on the Admin API, hence the API is accessible only -on the loopback interface to limit the attack surface. -Although not ideal, this setup requires fewer steps -to get started and can be further hardened as required. - -Please note that it is very important that Kong's Admin API is not accessible -inside the cluster as any malicious service can change Kong's configuration. -If you're exposing Kong's Admin API itself outside the cluster, please ensure -that you have the necessary authentication in place first. - -### Authentication on Kong's Admin API - -If Kong's Admin API is protected with one of the authentication plugins, -the Controller can authenticate itself against it to add another layer of -security. -The Controller comes with support for injecting arbitrary HTTP headers -in the requests it makes to Kong's Admin API, which can be used to inject -authentication credentials. -The headers can be specified using the CLI flag `--kong-admin-header` in the Ingress -Controller. - -The Ingress Controller will support mutual-TLS-based authentication on Kong's Admin -API in future. - -### {{site.ee_product_name}} RBAC - -{{site.ee_product_name}} comes with support for authentication and authorization on -Kong's Admin API. - -Once an RBAC token is provisioned, the {{site.kic_product_name}} can use the RBAC -token to authenticate against {{site.ee_product_name}}. Use the `--kong-admin-header` CLI -flag to pass the RBAC token the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/admission-webhook.md b/app/kubernetes-ingress-controller/1.1.x/deployment/admission-webhook.md deleted file mode 100644 index 23ee8d5da9fb..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/admission-webhook.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Validating Admission Controller ---- - -The {{site.kic_product_name}} ships with an Admission Controller for KongPlugin -and KongConsumer resources in the `configuration.konghq.com` API group. - -The Admission Controller needs a TLS certificate and key pair which -you need to generate as part of the deployment. - -Following guide walks through a setup of how to create the required key-pair -and enable the admission controller. - -Please note that this requires {{site.kic_product_name}} >= 0.6 to be -already installed in the cluster. - -## tldr; - -If you are using the stock YAML manifests to install and setup Kong for -Kubernetes, then you can setup the admission webhook using a single command: - -```bash -curl -sL https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/hack/deploy-admission-controller.sh | bash - -``` - -This script takes all the following commands and packs them together. -You need `kubectl` and `openssl` installed on your workstation for this to -work. - -## Create a certificate for the admission controller - -Kuberentes API-server makes an HTTPS call to the Admission Controller to verify -if the custom resource is valid or not. For this to work, Kubernetes API-server -needs to trust the CA certificate that is used to sign Admission Controller's -TLS certificate. - -This can be accomplished either using a self-signed certificate or using -Kubernetes CA. Follow one of the steps below and then go to -[Create the secret](#create-the-secret) step below. - -Please note the `CN` field of the x509 certificate takes the form -`..svc`, which -in the default case is `kong-validation-webhook.kong.svc`. - -### Using self-signed certificate - -Use openssl to generate a self-signed certificate: - -```bash -$ openssl req -x509 -newkey rsa:2048 -keyout tls.key -out tls.crt -days 365 \ - -nodes -subj "/CN=kong-validation-webhook.kong.svc" \ - -extensions EXT -config <( \ - printf "[dn]\nCN=kong-validation-webhook.kong.svc\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:kong-validation-webhook.kong.svc\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -Generating a 2048 bit RSA private key -..........................................................+++ -.............+++ -writing new private key to 'key.pem' -``` - -### Using in-built Kubernetes CA - -Kubernetes comes with an in-built CA which can be used to provision -a certificate for the Admission Controller. -Please refer to the -[this guide](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) -on how to generate a certificate using the in-built CA. - -### Create the secret - -Next, create a Kubernetes secret object based on the key and certificate that -was generatd in the previous steps. -Here, we assume that the PEM-encoded certificate is stored in a file named -`tls.crt` and private key is stored in `tls.key`. - -```bash -$ kubectl create secret tls kong-validation-webhook -n kong \ - --key tls.key --cert tls.crt -secret/kong-validation-webhook created -``` - -## Update the deployment - -Once the secret is created, update the Ingress Controller deployment: - -Execute the following command to patch the {{site.kic_product_name}} deployment -to mount the certificate and key pair and also enable the admission controller: - -```bash -$ kubectl patch deploy -n kong ingress-kong \ - -p '{"spec":{"template":{"spec":{"containers":[{"name":"ingress-controller","env":[{"name":"CONTROLLER_ADMISSION_WEBHOOK_LISTEN","value":":8080"}],"volumeMounts":[{"name":"validation-webhook","mountPath":"/admission-webhook"}]}],"volumes":[{"secret":{"secretName":"kong-validation-webhook"},"name":"validation-webhook"}]}}}}' -deployment.extensions/ingress-kong patched -``` - -## Enable the validating admission - -If you are using Kubernetes CA to generate the certificate, you don't need -to supply a CA certificate (in the `caBunde` param) -as part of the Validation Webhook configuration -as the API-server already trusts the internal CA. - -```bash -$ echo "apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: kong-validations -webhooks: -- name: validations.kong.konghq.com - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: ["v1beta1"] - rules: - - apiGroups: - - configuration.konghq.com - apiVersions: - - '*' - operations: - - CREATE - - UPDATE - resources: - - kongconsumers - - kongplugins - - apiGroups: - - '' - apiVersions: - - 'v1' - operations: - - CREATE - - UPDATE - resources: - - secrets - clientConfig: - service: - namespace: kong - name: kong-validation-webhook - caBundle: $(cat tls.crt | base64 -w 0) " | kubectl apply -f - -``` - -## Verify if it works - -### Verify duplicate KongConsumers - -Create a KongConsumer with username as `harry`: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, create another KongConsumer with the same username: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry2 - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: consumer already exists -``` - -The validation webhook rejected the KongConsumer resource as there already -exists a consumer in Kong with the same username. - -### Verify incorrect KongPlugins - -Try to create the folowing KongPlugin resource. -The `foo` config property does not exist in the configuration definition and -hence the Admission Controller returns back an error. -If you remove the `foo: bar` configuration line, the plugin will be -created succesfully. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - foo: bar - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: 400 Bad Request {"fields":{"config":{"foo":"unknown field"}},"name":"schema violation","code":2,"message":"schema violation (config.foo: unknown field)"} -``` - -### Verify incorrect credential secrets - -With 0.7 and above versions of the controller, validations also take place -for incorrect secret types and wrong parameters to the secrets: - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=basic-auth \ - --from-literal=username=foo -Error from server: admission webhook "validations.kong.konghq.com" denied the request: missing required field(s): password -``` - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=wrong-auth \ - --from-literal=sdfkey=my-sooper-secret-key -Error from server: admission webhook "validations.kong.konghq.com" denied the request: invalid credential type: wrong-auth -``` diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/aks.md b/app/kubernetes-ingress-controller/1.1.x/deployment/aks.md deleted file mode 100644 index df60e4014709..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/aks.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong Ingress on Azure Kubernetes Service (AKS) ---- - -## Requirements - -1. A fully functional AKS cluster. - Please follow Azure's Guide to - [set up an AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the AKS Kubernetes - cluster you'll work on. The above AKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It will take a few minutes for all containers to start and report -healthy status. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Set up environment variables - -Next, set up an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's set up an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Microsoft Azure to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/eks.md b/app/kubernetes-ingress-controller/1.1.x/deployment/eks.md deleted file mode 100644 index 209a750f4edf..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/eks.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Kong Ingress on Elastic Kubernetes Service (EKS) ---- - -## Requirements - -1. A fully functional EKS cluster. - Please follow Amazon's Guide to - [set up an EKS cluster](https://aws.amazon.com/getting-started/projects/deploy-kubernetes-app-amazon-eks/). -2. Basic understanding of Kubernetes -3. A working `kubectl` linked to the EKS Kubernetes - cluster we will work on. The above EKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It may take a few minutes for all containers to start and report -healthy statuses. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, create an environment variable with the IP address at which -Kong is accessible. This IP address sends requests to the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 example.eu-west-1.elb.amazonaws.com 80:31929/TCP,443:31408/TCP 57d -``` - -Create an environment variable to hold the ELB hostname: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].hostname}" service -n kong kong-proxy) -``` - -> Note: It may take some time for Amazon to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## TLS configuration - -Versions of Kong prior to 2.0.0 default to using [the "modern" cipher suite -list](https://wiki.mozilla.org/Security/Server_Side_TLS). This is not -compatible with ELBs when the ELB terminates TLS at the edge and establishes a -new session with Kong. This error will appear in Kong's logs: - -``` -*7961 SSL_do_handshake() failed (SSL: error:1417A0C1:SSL routines:tls_post_process_client_hello:no shared cipher) while SSL handshaking -``` - -To correct this issue, set `KONG_SSL_CIPHER_SUITE=intermediate` in your -environment variables. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/gke.md b/app/kubernetes-ingress-controller/1.1.x/deployment/gke.md deleted file mode 100644 index 52de2a69d458..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/gke.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Kong Ingress on Google Kubernetes Engine (GKE) ---- - -## Requirements - -1. A fully functional GKE cluster. - The easiest way to do this is to do it via the web UI: - Go to Google Cloud's console > Kubernetes Engine > Cluster > - Create a new cluster. - This documentation has been tested on a zonal cluster in - europe-west-4a using 1.10.5-gke.4 as Master version. - The default pool has been assigned 2 nodes of kind 1VCPU - with 3.75GB memory (default setting). - The OS used is COS (Container Optimized OS) and the auto-scaling - has been enabled. Default settings are being used except for - `HTTP load balancing` which has been disabled (you probably want to use - Kong features for this). For more information on GKE clusters, - refer to - [the GKE documentation](https://cloud.google.com/kubernetes-engine/docs/). -1. If you wish to use a static IP for Kong, you have to reserve a static IP - address (in Google Cloud's console > VPC network > - External IP addresses). For information, - you must create a regional IP - global is not supported as `loadBalancerIP` yet) -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the GKE Kubernetes - cluster we will work on. For information, you can associate a new `kubectl` - context by using: - - ```bash - gcloud container clusters get-credentials --zone --project - ``` - -## Update User Permissions - -> Because of [the way Kubernetes Engine checks permissions -when you create a Role or ClusterRole](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control), you must -first create a RoleBinding that grants you all of -the permissions included in the role you want to create. -An example workaround is to create a RoleBinding that -gives your Google identity a cluster-admin role -before attempting to create additional Role or -ClusterRole permissions. -This is a known issue in RBAC in Kubernetes and -Kubernetes Engine versions 1.6 and -later. - -A fast workaround: - -```yaml - -echo -n " -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: cluster-admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: User - name: # usually the Google account - # e.g.: example@testorg.com - namespace: kube-system" | kubectl apply -f - - -``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Google to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s-enterprise.md b/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s-enterprise.md deleted file mode 100644 index 0e5dfae63cb9..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s-enterprise.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Kong for Kubernetes Enterprise ---- - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -## Prerequisites - -Before we can deploy Kong, we need to satisfy one prerequisite: create a license -secret. - -To create this secret, provision the `kong` namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -Enterprise version requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -If you do not have one, please contact your sales representative. -Save the license file temporarily to disk with filename `license` -and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -## Installers - -Once the secret is in-place, we can proceed with installation. - -Kong for Kubernetes can be installed using an installer of -your choice: - -### YAML manifests - -Execute the following to install Kong for Kubernetes Enterprise using YAML -manifests: - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml -``` - -It takes a few minutes the first time this setup is done. - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-6ffcf8c447-5qv6z 2/2 Running 1 44m -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.254.78 35.233.198.16 80:32697/TCP,443:32365/TCP 22h -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP address assigned to the service. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for your cloud provider to actually associate the -IP address to the `kong-proxy` Service. - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes Enterprise: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/enterprise-k8s -``` - -You can use the above URL as a base kustomization and build on top of it -as well. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name - --namespace kong \ - -f values.yaml \ - --set ingressController.installCRDs=false -``` - -### Example values.yaml -``` -image: - repository: kong/kong-gateway - tag: 2.2.1.0-alpine -env: - LICENSE_DATA: - valueFrom: - secretKeyRef: - name: kong-enterprise-license - key: license -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes Enterprise - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s.md b/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s.md deleted file mode 100644 index 288703cd434b..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/k4k8s.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong for Kubernetes ---- - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -## Installers - -Kong for Kubernetes can be installed using an installer of -your choice. - -Once you've installed Kong for Kubernetes, -jump to the [next section](#using-kong-for-kubernetes) -on using it. - -### YAML manifests - -Please pick one of the following guides depending on your platform: - -- [Minikube](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/) -- [Google Kubernetes Engine(GKE) by Google](/kubernetes-ingress-controller/{{page.release}}/deployment/gke/) -- [Elastic Kubernetes Service(EKS) by Amazon](/kubernetes-ingress-controller/{{page.release}}/deployment/eks/) -- [Azure Kubernetes Service(AKS) by Microsoft](/kubernetes-ingress-controller/{{page.release}}/deployment/aks/) - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/base -``` - -You can use the above URL as a base kustomization and build on top of it -to make it suite better for your cluster and use-case. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes - -Once you've installed Kong for Kubernetes, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/kong-enterprise.md b/app/kubernetes-ingress-controller/1.1.x/deployment/kong-enterprise.md deleted file mode 100644 index e2f7648a283c..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/kong-enterprise.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -This guide walks through setting up the {{site.kic_product_name}} using Kong -Enterprise. This architecture is described in detail in [this doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise/). - -We assume that we start from scratch and you don't have {{site.ee_product_name}} -deployed. For the sake of simplicity, we will deploy {{site.ee_product_name}} and -its database in Kubernetes itself. You can safely run them outside -Kubernetes as well. - -## Prerequisites - -Before we can deploy the {{site.kic_product_name}} with {{site.ee_product_name}}, -we need to satisfy the following prerequisites: -- [{{site.ee_product_name}} License secret](#kong-enterprise-license-secret) -- [{{site.ee_product_name}} bootstrap password](#kong-enterprise-bootstrap-password) - -In order to create these secrets, let's provision the `kong` -namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -{{site.ee_product_name}} requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -Save the license file temporarily to disk and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -### {{site.ee_product_name}} bootstrap password - -Next, we need to create a secret containing the password using which we can login into Kong Manager. -Please replace `cloudnative` with a random password of your choice and note it down. - -```bash -$ kubectl create secret generic kong-enterprise-superuser-password -n kong --from-literal=password=cloudnative -secret/kong-enterprise-superuser-password created -``` - -Once these are created, we are ready to deploy {{site.ee_product_name}} -Ingress Controller. - -## Install - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/single/all-in-one-postgres-enterprise.yaml -``` - -It takes a little while to bootstrap the database. -Once bootstrapped, you should see the {{site.kic_product_name}} running with -{{site.ee_product_name}} as its core: - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-548b9cff98-n44zj 2/2 Running 0 21s -kong-migrations-pzrzz 0/1 Completed 0 4m3s -postgres-0 1/1 Running 0 4m3s -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-admin LoadBalancer 10.63.255.85 34.83.95.105 80:30574/TCP 4m35s -kong-manager LoadBalancer 10.63.247.16 34.83.242.237 80:31045/TCP 4m34s -kong-proxy LoadBalancer 10.63.242.31 35.230.122.13 80:32006/TCP,443:32007/TCP 4m34s -kong-validation-webhook ClusterIP 10.63.240.154 443/TCP 4m34s -postgres ClusterIP 10.63.241.104 5432/TCP 4m34s - -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP assigned to the three LoadBalancer type services. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. If you are running Minikube, you will not get an -external IP address. - -### Setup Kong Manager - -Next, if you browse to the IP address or host of the `kong-manager` service in your Browser, -which in our case is `http://34.83.242.237`. -Kong Manager should load in your browser. -Try logging in to the Manager with the username `kong_admin` -and the password you supplied in the prerequisite, it should fail. -The reason being we've not yet told Kong Manager where it can find the Admin API. - -Let's set that up. We will take the External IP address of `kong-admin` service and -set the environment variable `KONG_ADMIN_API_URI`: - -```bash -KONG_ADMIN_IP=$(kubectl get svc -n kong kong-admin --output=jsonpath='{.status.loadBalancer.ingress[0].ip}') -kubectl patch deployment -n kong ingress-kong -p "{\"spec\": { \"template\" : { \"spec\" : {\"containers\":[{\"name\":\"proxy\",\"env\": [{ \"name\" : \"KONG_ADMIN_API_URI\", \"value\": \"${KONG_ADMIN_IP}\" }]}]}}}}" -``` - -It will take a few minutes to roll out the updated deployment and once the new -`ingress-kong` pod is up and running, you should be able to log into the Kong Manager UI. - -As you follow along with other guides on how to use your newly deployed the {{site.kic_product_name}}, -you will be able to browse Kong Manager and see changes reflected in the UI as Kong's -configuration changes. - -## Using Kong for Kubernetes with {{site.ee_product_name}} - -Let's setup an environment variable to hold the IP address of `kong-proxy` service: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. - -## Customizing by use-case - -The deployment in this guide is a point to start using Ingress Controller. -Based on your existing architecture, this deployment will require custom -work to make sure that it needs all of your requirements. - -In this guide, there are three load-balancers deployed for each of -Kong Proxy, Kong Admin and Kong Manager services. It is possible and -recommended to instead have a single Load balancer and then use DNS names -and Ingress resources to expose the Admin and Manager services outside -the cluster. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/minikube.md b/app/kubernetes-ingress-controller/1.1.x/deployment/minikube.md deleted file mode 100644 index d56a366bb0bd..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/minikube.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Kong Ingress on Minikube ---- - -## Setup Minikube - -1. Install [`minikube`](https://github.com/kubernetes/minikube) - - Minikube is a tool that makes it easy to run Kubernetes locally. - Minikube runs a single-node Kubernetes cluster inside a VM on your laptop - for users looking to try out Kubernetes or develop with it day-to-day. - -1. Start `minikube` - - ```bash - minikube start - ``` - - It will take a few minutes to get all resources provisioned. - - ```bash - kubectl get nodes - ``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -> Note: this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -```bash -$ export PROXY_IP=$(minikube service -n kong kong-proxy --url | head -1) -# If installed by helm, service name would be "-kong-proxy". -# $ export PROXY_IP=$(minikube service -kong-proxy --url | head -1) -$ echo $PROXY_IP -http://192.168.99.100:32728 -``` - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.1.x/deployment/overview.md b/app/kubernetes-ingress-controller/1.1.x/deployment/overview.md deleted file mode 100644 index 5a37579b5a99..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/deployment/overview.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Installing and Configuring ---- - -## Getting started - -If you are getting started with Kong for Kubernetes, -install it on Minikube using our Minikube [setup guide](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/). - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## Overview - -The {{site.kic_product_name}} can be installed on a local, managed -or any Kubernetes cluster which supports a service of type `LoadBalancer`. - -As explained in the [deployment document](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), there -are a variety of configurations and runtimes for the {{site.kic_product_name}}. - -The following sections detail on deployment steps for all the different -runtimes: - -## Kong for Kubernetes - - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s/) to deploy Kong for Kubernetes -using an installation method of your choice. - -## Kong for Kubernetes Enterprise - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) to deploy Kong for Kubernetes -Enterprise if you have purchased or are trying out {{site.ee_product_name}}. - -## Kong for Kubernetes with {{site.ee_product_name}} - -Kong for Kubernetes can integrate with {{site.ee_product_name}} to -provide a single pane of visibility across all of your services -that are running in Kubernetes and non-Kubernetes environments. - -This [guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise/) goes into details of -the architecture and how one can set that up. - -## Admission Controller - -The {{site.kic_product_name}} also ships with a Validating -Admission Controller that -can be enabled to verify KongConsumer, KongPlugin and Secret -resources as they are created. -Please follow the [admission-webhook](/kubernetes-ingress-controller/{{page.release}}/deployment/admission-webhook/) deployment -guide to set it up. diff --git a/app/kubernetes-ingress-controller/1.1.x/faq.md b/app/kubernetes-ingress-controller/1.1.x/faq.md deleted file mode 100644 index 8f401a8b6e3c..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/faq.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: FAQs ---- - -### Why endpoints and not services? - -The {{site.kic_product_name}} does not use -[Services][k8s-service] to route traffic -to the pods. Instead, it uses the Endpoints API -to bypass [kube-proxy][kube-proxy] -to allow Kong features like session affinity and -custom load balancing algorithms. -It also removes overhead -such as conntrack entries for iptables DNAT. - -### Is it possible to create consumers using the Admin API? - -From version 0.5.0 onwards, the {{site.kic_product_name}} tags each entity -that it manages inside Kong's database and only manages the entities that -it creates. -This means that if consumers and credentials are created dynamically, they -won't be deleted by the Ingress Controller. - -[k8s-service]: https://kubernetes.io/docs/concepts/services-networking/service -[kube-proxy]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/cert-manager.md b/app/kubernetes-ingress-controller/1.1.x/guides/cert-manager.md deleted file mode 100644 index 471b4b29189c..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/cert-manager.md +++ /dev/null @@ -1,372 +0,0 @@ ---- -title: Using cert-manager for automated TLS certificate ---- - -This guide will walk through steps to set up the {{site.kic_product_name}} with -cert-manager to automate certificate management using Let's Encrypt. -Any ACME-based CA can be used in-place of Let's Encrypt as well. - -## Before you begin - -You will need the following: - -- Kubernetes cluster that can provision an IP address that is routable from - the Internet. If you don't have one, you can use GKE or any managed k8s - cloud offering. -- A domain name for which you control the DNS records. - This is necessary so that - Let's Encrypt can verify the ownership of the domain and issue a certificate. - In the current guide, we use `example.com`, please replace this with a domain - you control. - -This tutorial was written using Google Kubernetes Engine. - -## Set up the {{site.kic_product_name}} {#set-up-kic} - -Execute the following to install the Ingress Controller: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.example.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -## Set up cert-manager - -Please follow cert-manager's [documentation](https://cert-manager.io/docs/installation/) -on how to install cert-manager onto your cluster. - -Once installed, verify all the components are running using: - -```bash -kubectl get all -n cert-manager -NAME READY STATUS RESTARTS AGE -pod/cert-manager-86478c5ff-mkhb9 1/1 Running 0 23m -pod/cert-manager-cainjector-65dbccb8b6-6dnjl 1/1 Running 0 23m -pod/cert-manager-webhook-78f9d55fdf-5wcnp 1/1 Running 0 23m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cert-manager-webhook ClusterIP 10.63.240.251 443/TCP 23m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/cert-manager 1 1 1 1 23m -deployment.apps/cert-manager-cainjector 1 1 1 1 23m -deployment.apps/cert-manager-webhook 1 1 1 1 23m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cert-manager-86478c5ff 1 1 1 23m -replicaset.apps/cert-manager-cainjector-65dbccb8b6 1 1 1 23m -replicaset.apps/cert-manager-webhook-78f9d55fdf 1 1 1 23m -``` - -## Set up your application - -Any HTTP-based application can be used, for the purpose of the demo, install -the following echo server: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Set up DNS - -Get the IP address of the load balancer for Kong: - -```bash -$ kubectl get service -n kong kong-proxy -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 35.233.170.67 80:31929/TCP,443:31408/TCP 58d -``` - -To get only the IP address: - -```bash -$ kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy -35.233.170.67 -``` - -Please note that the IP address in your case will be different. - -Next, setup a DNS records to resolve `proxy.example.com` to the -above IP address: - -```bash -$ dig +short proxy.example.com -35.233.170.67 -``` - -Next, setup a CNAME DNS record to resolve `demo.example.com` to -`proxy.example.com`. - -```bash -$ dig +short demo.yolo2.com -proxy.example.com. -35.233.170.67 -``` - -## Expose your application to the Internet - -Setup an Ingress rule to expose the application: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -Access your application: - -```bash -$ curl -I demo.example.com -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 21:14:45 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 -``` - -## Request TLS Certificate from Let's Encrypt - -First, setup a ClusterIssuer for cert-manager - -```bash -$ echo "apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod - namespace: cert-manager -spec: - acme: - email: user@example.com #please change this - privateKeySecretRef: - name: letsencrypt-prod - server: https://acme-v02.api.letsencrypt.org/directory - solvers: - - http01: - ingress: - class: kong" | kubectl apply -f - -clusterissuer.cert-manager.io/letsencrypt-prod configured -``` - -*Note*: If you run into issues configuring this, -be sure that the group (`cert-manager.io`) and -version (`v1`) match those in the output of -`kubectl describe crd clusterissuer`. -This directs cert-manager which CA authority to use to issue the certificate. - -Next, update your Ingress resource to provision a certificate and then use it: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: kong -spec: - tls: - - secretName: demo-example-com - hosts: - - demo.example.com - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-example-com configured -``` - -Things to note here: - -- The annotation `kubernetes.io/tls-acme` is set to `true`, informing - cert-manager that it should provision a certificate for hosts in this - Ingress using ACME protocol. -- `certmanager.k8s.io/cluster-issuer` is set to `letsencrypt-prod`, directing - cert-manager to use Let's Encrypt's production server to provision a TLS - certificate. -- `tls` section of the Ingress directs the {{site.kic_product_name}} to use the - secret `demo-example-com` to encrypt the traffic for `demo.example.com`. - This secret will be created by cert-manager. - -Once you update the Ingress resource, cert-manager will start provisioning -the certificate and in sometime the certificate will be available for use. - -You can track the progress of certificate issuance: - -```bash -$ kubectl describe certificate demo-example-com -Name: demo-example-com -Namespace: default -Labels: -Annotations: -API Version: certmanager.k8s.io/v1 -Kind: Certificate -Metadata: - Creation Timestamp: 2019-06-21T20:41:54Z - Generation: 1 - Owner References: - API Version: extensions/v1beta1 - Block Owner Deletion: true - Controller: true - Kind: Ingress - Name: demo-example-com - UID: 261d15d3-9464-11e9-9965-42010a8a01ad - Resource Version: 19561898 - Self Link: /apis/certmanager.k8s.io/v1/namespaces/default/certificates/demo-example-com - UID: 014d3f1d-9465-11e9-9965-42010a8a01ad -Spec: - Acme: - Config: - Domains: - demo.example.com - Http 01: - Dns Names: - demo.example.com - Issuer Ref: - Kind: ClusterIssuer - Name: letsencrypt-prod - Secret Name: demo-example-com -Status: - Conditions: - Last Transition Time: 2019-06-21T20:42:20Z - Message: Certificate is up to date and has not expired - Reason: Ready - Status: True - Type: Ready - Not After: 2019-09-19T19:42:19Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Generated 53m cert-manager Generated new private key - Normal GenerateSelfSigned 53m cert-manager Generated temporary self signed certificate - Normal OrderCreated 53m cert-manager Created Order resource "demo-example-com-3811625818" - Normal OrderComplete 53m cert-manager Order "demo-example-com-3811625818" completed successfully - Normal CertIssued 53m cert-manager Certificate issued successfully -``` - -## Test HTTPS - -Once all is in place, you can use HTTPS: - -```bash -$ curl -v https://demo.example.com -* Rebuilt URL to: https://demo.example.com/ -* Trying 35.233.170.67... -* TCP_NODELAY set -* Connected to demo.example.com (35.233.170.67) port 443 (#0) -* ALPN, offering h2 -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/cert.pem - CApath: none -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=demo.example.com -* start date: Jun 21 19:42:19 2019 GMT -* expire date: Sep 19 19:42:19 2019 GMT -* subjectAltName: host "demo.example.com" matched cert's "demo.example.com" -* issuer: C=US; O=Let's Encrypt; CN=Let's Encrypt Authority X3 -* SSL certificate verify ok. -> GET / HTTP/1.1 -> Host: demo.example.com -> User-Agent: curl/7.54.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Connection: keep-alive -< Date: Fri, 21 Jun 2019 21:37:43 GMT -< Server: echoserver -< X-Kong-Upstream-Latency: 1 -< X-Kong-Proxy-Latency: 1 -< Via: kong/1.1.2 -< - - -Hostname: echo-d778ffcd8-52ddj - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-9w4t - pod name: echo-d778ffcd8-52ddj - pod namespace: default - pod IP:10.60.2.246 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.2.239 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://demo.example.com:8080/ - -Request Headers: - accept=*/* - connection=keep-alive - host=demo.example.com - user-agent=curl/7.54.0 - x-forwarded-for=10.138.0.6 - x-forwarded-host=demo.example.com - x-forwarded-port=8443 - x-forwarded-proto=https - x-real-ip=10.138.0.6 - -Request Body: - -no body in request- -``` - -Et voilà ! You've secured your API with HTTPS -with the {{site.kic_product_name}} and cert-manager. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/configure-acl-plugin.md b/app/kubernetes-ingress-controller/1.1.x/guides/configure-acl-plugin.md deleted file mode 100644 index 2ef8aecd4249..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/configure-acl-plugin.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -title: Configuring ACL Plugin ---- - -This guide walks through configuring the Kong ACL Plugin. The ACL Plugin -requires the use of at least one Authentication plugin. This example will use -the JWT Auth Plugin - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create two Ingress rules to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Test the Ingress rules: - -```bash -$ curl -i $PROXY_IP/get -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -``` - -## Add JWT authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. Let's enable JWT authentication - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: app-jwt -plugin: jwt -" | kubectl apply -f - -``` - -Now let's associate the plugin to the Ingress rules we created earlier. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any requests matching the proxying rules for `demo-get` and `demo` post will -now require a valid JWT and the consumer for the JWT to be associate with the -right ACL. - -```bash -$ curl -i $PROXY_IP/get - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} -``` - -You should get a 401 response telling you that the request is not authorized. - -## Provision Consumers - -Let's provision 2 KongConsumer resources: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -" | kubectl apply -f - -``` - -## Secrets - -Next, let's provision some Secrets for the KongConsumers to reference. Each -ACL will need its own Secret and each JWT public key will need its own Secret. -The credential type is specified in the `kongCredType` field. In this -case we'll be using `jwt` and `acl`. You can create a secret using any other -method as well. - -The JWT signing algorithm is set in the `algorithm` field. The if using a -public key like this example it is stored in the `rsa_pulic_key` field. If you -are using a secret signing key, use the `secret` field. The last field to set -if you are using `RS256` or `ES256` is the `key` field. This should match the -`iss` field in the JWT you will be sending. You can check this value by -decoding your JWT over at [https://jwt.io](https://jwt.io) - -Since we are using the Secret resource, Kubernetes will encrypt and store the -JWT signing key and ACL group for us. - -### JWT signing key - -```bash -# create secret for jwt public key -$ kubectl create secret \ - generic app-admin-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="admin-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - MIIBIjA.... - -----END PUBLIC KEY-----" - -# create a second secret with a different key -$ kubectl create secret \ - generic app-user-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="user-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - qwerlkjqer.... - -----END PUBLIC KEY-----" -``` - -Validation requirements impose that even if the `secret` is not used for algorithm -`RS256` or `ES256` the field `secret` must be present, so put some dummy value for it. - -## Assign the credentials - -In order to for the ACL and JWT to be validated by Kong, the secrets will need -to be referenced by the KongConsumers we created earlier. Let's update those. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt -" | kubectl apply -f - -``` - -## Use the credential - -Now to use a JWT to pass authentication. Let's store the user and admin jwt's -in some environment variables. `USER_JWT` and `ADMIN_JWT`. If you are using -an identity provider, you should be able to login and get out a JWT from their -API. If you are generating your own, go through the process of generating your -own. - -Let's test the get route - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - - -``` - -## Adding ACL's - -The JWT plugin doesn't provide the ability to authroize a given issuer to a -given ingress. To do this we need to use the ACL plugin. Let's create an admin -ACL config - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: admin-acl -plugin: acl -config: - whitelist: ['app-admin'] -" | kubectl apply -f - -``` - -Then let's create a user ACL config. We want our admin to be able to access -the same resources as the user, so let's make sure we include them in the -whitelist. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: plain-user-acl -plugin: acl -config: - whitelist: ['app-user','app-admin'] -" | kubectl apply -f - -``` - -Next let's create the secrets that will define the ACL groups. - -```bash -# create secrets for acl groups -$ kubectl create secret \ - generic app-admin-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-admin - -$ kubectl create secret \ - generic app-user-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-user -``` - -After we create the secrets, the consumers need to be updated to reference the -ACL credentials - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt - - app-admin-acl -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt - - app-user-acl -" | kubectl apply -f - -``` - -The last thing to configure is the ingress to use the new plguins. Note, if you -set more than one ACL plugin, the last one supplied will be the only one -evaluated. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt,plain-user-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt,admin-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Now let's test it. - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-user", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post -HTTP/1.1 403 Forbidden -Date: Mon, 06 Apr 2020 07:11:59 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 45 -X-Kong-Response-Latency: 1 -Server: kong/2.0.2 - -{"message":"You cannot consume this service"} -``` - -The `plain-user` user is not in the `admin-acl` whitelist, and is therefore -unauthorized to access the resource - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 1156 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 07:20:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 4 -X-Kong-Proxy-Latency: 4 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} -``` diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-custom-entities.md b/app/kubernetes-ingress-controller/1.1.x/guides/configuring-custom-entities.md deleted file mode 100644 index 747286a87864..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-custom-entities.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Configuring Custom Entities ---- - -This is an **advanced-level** guide for users using custom entities in Kong. -Most users do not need to use this feature. - -Kong has in-built extensibility with its plugin architecture. -Plugins in Kong have a `config` property where users can store configuration -for any custom plugin and this suffices in most use cases. -In some use cases, plugins define custom entities to store additional -configuration outside the plugin instance itself. -This guide elaborates on how such custom entities can be used with the Kong -Ingress Controller. - -> Note: All entities shipped with Kong are supported by the -{{site.kic_product_name}}out of the box. This guide applies only if you have a -custom entity in your plugin. To check if your plugin contains a custom entity, -the source code will usually contain a `daos.lua` file. -Custom plugins have first-class support in the {{site.kic_product_name}} -via the `KongPlugin` CRD. -Please read [the custom plugin guide](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) instead -if you are only using Custom plugins. - -## Caveats - -- The feature discussed in this guide apply for DB-less deployments of Kong. - The feature is not supported for deployments where Kong is used with a - database or Kong is used in hybrid mode. - For these deployments, configure custom entities directly using Kong's Admin - API. -- Custom entities which have a foreign relation with other core entities in Kong - are not supported. Only entities which can exist by themselves and then - be referenced via plugin configuration are supported. - -## Creating a JSON representation of the custom entity - -In this section, we will learn how to create a JSON representation of -a custom entity. - -Suppose you have a custom entity with the following schema in your plugin source: - -```lua -{ - name = "xkcds", - primary_key = { "id" }, - cache_key = { "name" }, - endpoint_key = "name", - fields = { - { id = typedefs.uuid }, - { - name = { - type= "string", - required = true, - unique = true, - }, - }, - { - url = { - type = "string", - required = true, - }, - }, - { created_at = typedefs.auto_timestamp_s }, - { updated_at = typedefs.auto_timestamp_s }, - }, -} -``` - -An instance of such an entity would look like: - -```json -{ - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "Bobby Drop Tables", - "url": "https://xkcd.com/327/" -} -``` - -Multiple instances of such an entity are represented as follows: - -```json -{ - "xkcds": [ - { - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "bobby_tables", - "url": "https://xkcd.com/327/" - }, - { - "id": "d079a632-ac8d-4a9a-860c-71de82e8fc11", - "name": "compiling", - "url": "https://xkcd.com/303/" - } - ] -} -``` - -If you have more than one custom entities that you would like to configure -then you can create other entities by specifying the entity name at the root -level of the JSON as the key and then a JSON array containing the -custom entities as the value of the key. - -To configure custom entities in a DB-less instance of Kong, -you first need to create such a JSON representation of your entities. - -## Configuring the custom entity secret - -Once you have the JSON representation, we need to store the configuration -inside a Kubernetes Secret. -The following command assumes the filename to be `entities.json` but you can -use any other filename as well: - -```bash -$ kubectl create secret generic -n kong kong-custom-entities --from-file=config=entities.json -secret/kong-custom-entities created -``` - -Some things to note: -- The key inside the secret must be `config`. This is not configurable at the - moment. -- The secret must be accessible by the Ingress Controller. The recommended - practice here is to install the secret in the same namespace in which Kong - is running. - -## Configure the Ingress Controller - -Once you have the secret containing the custom entities configured, -you need to instruct the controller to read the secret and sync the custom -entities to Kong. - -To do this, you need to add the following environment variable to the -`ingress-ccontroller` container: - -```yaml -env: -- name: CONTROLLER_KONG_CUSTOM_ENTITIES_SECRET - value: kong/kong-custom-entities -``` - -This value of the environment variable takes the form of `/`. -You need to configure this only once. - -This instructs the controller to watch the above secret and configure Kong -with any custom entities present inside the secret. -If you change the configuration and update the secret with different entities, -the controller will dynamically fetch the updated secret and configure Kong. - -## Verification - -You can verify that the custom entity was actually created in Kong's memory -using the `GET /xkcds` (endpoint will differ based on the name of the entity) -on Kong's Admin API. -You can forward traffic from your local machine to the Kong Pod to access it: - -```bash -$ kubectl port-forward -n kong KONG-POD-NAME 8444:8444 -``` - -and in a separate terminal: - -```bash - $ curl -k https://localhost:8444/ -``` - -## Using the custom entity - -You can now use reference the custom entity in any of your custom plugin's -`config` object: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: random-xkcd-header -config: - xkcds: - - d079a632-ac8d-4a9a-860c-71de82e8fc11 -plugin: xkcd-header -``` diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-fallback-service.md b/app/kubernetes-ingress-controller/1.1.x/guides/configuring-fallback-service.md deleted file mode 100644 index 46d238e4b6ad..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-fallback-service.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Configuring a fallback service ---- - -This guide walks through how to setup a fallback service using Ingress -resource. The fallback service will receive all requests that don't -match against any of the defined Ingress rules. -This can be useful for scenarios where you would like to return a 404 page -to the end user if the user clicks on a dead link or inputs an incorrect URL. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup a simple HTTP service in the -cluster and proxy it. - -```bash -$ echo ' -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fallback-svc -spec: - replicas: 1 - selector: - matchLabels: - app: fallback-svc - template: - metadata: - labels: - app: fallback-svc - spec: - containers: - - name: fallback-svc - image: hashicorp/http-echo - args: - - "-text" - - "This is not the path you are looking for. - Fallback service" - ports: - - containerPort: 5678 ---- -apiVersion: v1 -kind: Service -metadata: - name: fallback-svc - labels: - app: fallback-svc -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 5678 - protocol: TCP - name: http - selector: - app: fallback-svc -' | kubectl apply -f - -``` - -Result: - -```bash -deployment.apps/fallback-svc created -service/fallback-svc created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup a fallback service - -Let's deploy another sample service service: - -```bash -$ kubectl apply -f https://bit.ly/fallback-svc -deployment.extensions/fallback-svc created -service/fallback-svc created -``` - -Next, let's set up an Ingress rule to make it the fallback service -to send all requests to it that don't match any of our Ingress rules: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: fallback - annotations: - kubernetes.io/ingress.class: kong -spec: - backend: - serviceName: fallback-svc - servicePort: 80 -" | kubectl apply -f - -``` - -## Test it - -Now send a request with a request property that doesn't match against -any of the defined rules: - -```bash -$ curl $PROXY_IP/random-path -This is not the path you are looking for. - Fallback service -``` - -The above message comes from the fallback service that was deployed in the -last step. - -Create more Ingress rules, some complicated regex based ones and -see how requests that don't match any rules, are forwarded to the -fallback service. - -You can also use Kong's request-termination plugin on the `fallback` -Ingress resource to terminate all requests at Kong, without -forwarding them inside your infrastructure. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-health-checks.md b/app/kubernetes-ingress-controller/1.1.x/guides/configuring-health-checks.md deleted file mode 100644 index 875525104609..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-health-checks.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: Setting up Active and Passive health checks ---- - -In this guide, we will go through steps necessary to setup active and passive -health checking using the {{site.kic_product_name}}. This configuration allows -Kong to automatically short-circuit requests to specific Pods that are -mis-behaving in your Kubernetes Cluster. - -> Please make sure to use {{site.kic_product_name}} >= 0.6 as the previous -versions contain a [bug](https://github.com/hbagdi/go-kong/issues/6). - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy any requests yet. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Observe the headers and you can see that Kong has proxied the request correctly. - -## Setup passive health checking - -Now, let's setup passive HTTP health-check for our service. -All health-checking is done at Service-level and not Ingress-level. - -Add the following KongIngress resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking created -``` - -Here, we are configuring Kong to short-circuit requests to a pod -if a pod throws 3 consecutive errors. - -Next, associate the KongIngress resource with `httpbin` service: - -```bash -$ kubectl patch svc httpbin -p '{"metadata":{"annotations":{"konghq.com/override":"demo-health-checking"}}}' -service/httpbin patched -``` - -Now, let's send some traffic to test if this works: - -Let's send 2 requests that represent a failure from upstream -and then send a request for 200. -Here we are using `/status/500` to simulate a failure from upstream. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Kong has not short-circuited because there were only two failures. -Let's send 3 requests and open the circuit, and then send a normal request. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 22:41:19 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} - -``` - -As we can see, Kong returns back a 503, representing that the service is -unavailable. Since we have only one pod of httpbin running in our cluster, -and that is throwing errors, Kong will not proxy anymore requests. - -Now we have a few options: - -- Delete the current httpbin pod; Kong will then proxy requests to the new - pod that comes in its place. -- Scale the httpbin deployment; Kong will then proxy requests to the new - pods and leave the short-circuited pod out of the loop. -- Manually change the pod health status in Kong using Kong's Admin API. - -These options highlight the fact that once a circuit is opened because of -errors, there is no way for Kong to close the circuit again. - -This is a feature which some services might need, where once a pod starts -throwing errors, manual intervention is necessary before that pod can -again handle requests. -To get around this, we can introduce active health-check, where each instance -of Kong actively probes pods to figure out if they are healthy or not. - -## Setup active health checking - -Let's update our KongIngress resource to use active health-checks: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - active: - healthy: - interval: 5 - successes: 3 - http_path: /status/200 - type: http - unhealthy: - http_failures: 1 - interval: 5 - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking configured -``` - -Here, we are configuring Kong to actively probe `/status/200` every 5 seconds. -If a pod is unhealthy (from Kong's perspective), -3 successful probes will change the status of the pod to healthy and Kong -will again start to forward requests to that pod. - -Now, the requests should flow once again: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Let's trip the circuit again by sending three requests that will return -500s from httpbin: - -```bash -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -``` - -Now, sending the following request will fail for about 15 seconds, -the duration it will take active healthchecks to re-classify -the httpbin pod as healthy again. - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 23:17:47 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} -``` - -After 15 seconds, you will see: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As we can see, active health-checks automatically marked a pod as healthy -when passive health-checks marked it unhealthy. - -## Bonus - -Scale the `httpbin` and `ingress-kong` deployments and observe how -multiple pods change the outcome of the above demo. - -Read more about health-checks and ciruit breaker in Kong's -[documentation](/gateway/latest/reference/health-checks-circuit-breakers). diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-https-redirect.md b/app/kubernetes-ingress-controller/1.1.x/guides/configuring-https-redirect.md deleted file mode 100644 index b54fb8e41a2f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/configuring-https-redirect.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Configuring https redirect ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -redirect HTTP request to HTTPS so that all communication -from the external world to your APIs and microservices is encrypted. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup HTTPS redirect - -To instruct Kong to redirect all HTTP requests matching this Ingress rule to -HTTPS, update its annotations to limit its protocols to HTTPS only and -issue a 301 redirect: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"https","konghq.com/https-redirect-status-code":"301"}}}' -ingress.extensions/demo patched -``` - -## Test it - -Now, making a plain-text HTTP request to Kong will result in a redirect -being issued from Kong: - -```bash -$ curl $PROXY_IP/foo/headers -I -HTTP/1.1 301 Moved Permanently -Date: Tue, 06 Aug 2019 18:04:38 GMT -Content-Type: text/html -Content-Length: 167 -Connection: keep-alive -Location: https://35.197.125.63/foo/headers -Server: kong/1.2.1 -``` - -The `Location` header will contain the URL you need to use for an HTTPS -request. Please note that this URL will be different depending on your -installation method. You can also grab the IP address of the load balancer -fronting Kong and send a HTTPS request to test it. - -Let's test it: - -```bash -$ curl -k https://35.197.125.63/foo/headers -{ - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Host": "35.197.125.63", - "User-Agent": "curl/7.54.0", - "X-Forwarded-Host": "35.197.125.63" - } -} -``` - -We can see that Kong correctly serves the request only on HTTPS protocol -and redirects the user if plaint-text HTTP protocol is used. -We had to use `-k` flag in cURL to skip certificate validation as the -certificate served by Kong is a self-signed one. -If you are serving this traffic via a domain that you control and have -configured TLS properties for it, then the flag won't -be necessary. - -If you have a domain that you control but don't have TLS/SSL certificates -for it, please check out out -[Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager) guide which can get TLS -certificates setup for you automatically. And it's free, thanks to -Let's Encrypt! diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/getting-started-istio.md b/app/kubernetes-ingress-controller/1.1.x/guides/getting-started-istio.md deleted file mode 100644 index 19ef1a26341d..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/getting-started-istio.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Running the Kong Ingress Controller with Istio ---- - -In this guide, you will: -* Install Istio v1.6.7 and Kong in your cluster. -* Deploy an example Istio-enabled application (_bookinfo_). -* Deploy an `Ingress` customized with a `KongPlugin` for the example application. -* Make several requests to the sample application via Kong and Istio. -* See the performance metrics of the sample application, provided by Istio. - -### Prerequisites -For this guide, you will need: - -* A Kubernetes v1.15 (or newer) cluster which can pull container images from public registries. For example, you can use: - * A managed Kubernetes cluster (AWS EKS, Google Cloud GKE, Azure AKS). - * Minikube. - * `microk8s` with the `dns` addon enabled. -* `kubectl` with admin access to the cluster. - -### Download Istio - -Download the Istio bundle at version 1.6.7: - -```console -$ curl -L https://istio.io/downloadIstio | env ISTIO_VERSION=1.6.7 sh - -... -... -Istio 1.6.7 Download Complete! - -Istio has been successfully downloaded into the istio-1.6.7 folder on your system. -... -... -``` - -### Install Istio Operator - -Invoke `istioctl` to deploy the Istio Operator to the Kubernetes cluster: - -```console -$ ./istio-1.6.7/bin/istioctl operator init -Using operator Deployment image: docker.io/istio/operator:1.6.7 -✔ Istio operator installed -✔ Installation complete -``` - -### Deploy Istio using Operator - -Deploy Istio using Istio Operator: - -```console -$ kubectl create namespace istio-system -namespace/istio-system created -``` -```console -$ kubectl apply -f - < 8000 -Forwarding from [::1]:8080 -> 8000 -... -``` - -Navigate your web browser to `http://localhost:8080/` You should be able to see a bookstore web application. Click -through any available links several times. As you hit 30 requests per minute (for example, by holding down the "Refresh" -key combination, e.g. `` or ``), you should obtain a `Kong Error - API rate limit exceeded` response. - -### See the connection graph in Kiali - -Connect to Kiali (the Istio dashboard): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/kiali 20001:20001 -n istio-system -Forwarding from 127.0.0.1:20001 -> 20001 -Forwarding from [::1]:20001 -> 20001 -... -``` - -* Navigate your web browser to `http://localhost:20001/`. -* Log in using the default credentials (`admin`/`admin`). -* Choose _Workloads_ from the menu on the left. -* Select `my-istio-app` in the _Namespace_ drop-down menu. -* Click the _productpage-v1_ service name. -* Click the three dots button in the top-right corner of _Graph Overview_ and click _Show full graph_. -* Select `kong-istio` alongside `my-istio-app` in the _Namespace_ diagram. -* Observe a connection graph spanning from `example-kong-kong-proxy` through `productpage-v1` to the other sample -application services such as `ratings-v1` and `details-v1`. - -### See the metrics in Grafana - -Connect to Grafana (a dashboard frontend for Prometheus which has been deployed with Istio): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/grafana 3000:3000 -n istio-system -Forwarding from 127.0.0.1:3000 -> 3000 -Forwarding from [::1]:3000 -> 3000 -... -``` - -* Navigate your web browser to `http://localhost:3000/`. -* Expand the dashboard selection drop-down menu from the top of the screen. Expand the `istio` directory and choose the -_Istio Workload Dashboard_ from the list. -* Choose _Namespace: my-istio-app_ and _Workload: productpage-v1_ from the drop-downs. -* Choose a timespan in the top-right of the page to include the time when you made requests to the sample application (e.g. _Last 1 hour_). -* Observe the incoming and outgoing request graphs reflecting actual requests from Kong to `productpage-v1`, and from `productpage-v1` to its backends. - -Note that the requests from the web browser to Kong are not reflected in inbound stats of `example-kong-kong-proxy` -because we've issued these requests by `kubectl port-forward`, thus bypassing the Istio proxy sidecar in Kong. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/getting-started.md b/app/kubernetes-ingress-controller/1.1.x/guides/getting-started.md deleted file mode 100644 index 2de9ca9d32fc..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/getting-started.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Getting started with the Kong Ingress Controller ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return back -a HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.1.2 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy the request yet. - -## Set up an echo-server - -Setup an echo-server application to demonstrate how -to use the {{site.kic_product_name}}: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -This application just returns information about the -pod and details from the HTTP request. - -## Basic proxy - -Create an Ingress rule to proxy the echo-server created previously: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 17:12:49 GMT -Server: echoserver -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-txt52 - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-txt52 - pod namespace: default - pod IP: 172.17.0.14 -<-- clipped --> -``` - -If everything is deployed correctly, you should see the above response. -This verifies that Kong can correctly route traffic to an application running -inside Kubernetes. - -## Using plugins in Kong - -Setup a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -kongplugin.configuration.konghq.com/request-id created -``` - -Create a new Ingress resource which uses this plugin: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -The above resource directs Kong to execute the request-id plugin whenever -a request is proxied matching any rule defined in the resource. - -Send a request to Kong: - -```bash -$ curl -i -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:09:02 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-cnfmx - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-cnfmx - pod namespace: default - pod IP: 172.17.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=172.17.0.2 - method=GET - real path=/bar/sample - query= - request_version=1.1 - request_scheme=http - request_uri=http://example.com:8080/bar/sample - -Request Headers: - accept=*/* - connection=keep-alive - host=example.com - my-request-id=7250803a-a85a-48da-94be-1aa342ca276f#6 - user-agent=curl/7.54.0 - x-forwarded-for=172.17.0.1 - x-forwarded-host=example.com - x-forwarded-port=8000 - x-forwarded-proto=http - x-real-ip=172.17.0.1 - -Request Body: - -no body in request- -``` - -The `my-request-id` can be seen in the request received by echo-server. -It is injected by Kong as the request matches one -of the Ingress rules defined in `demo-example-com` resource. - -## Using plugins on Services - -Kong Ingress allows plugins to be executed on a service level, meaning -Kong will execute a plugin whenever a request is sent to a specific k8s service, -no matter which Ingress path it came from. - -Create a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: rl-by-ip -config: - minute: 5 - limit_by: ip - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/rl-by-ip created -``` - -Next, apply the `konghq.com/plugins` annotation on the Kubernetes Service -that needs rate-limiting: - -```bash -kubectl patch svc echo \ - -p '{"metadata":{"annotations":{"konghq.com/plugins": "rl-by-ip\n"}}}' -``` - -Now, any request sent to this service will be protected by a rate-limit -enforced by Kong: - -```bash -$ curl -I $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:25:49 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 2 -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 4 -Via: kong/1.1.2 - -$ curl -I -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:28:30 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 2 -Via: kong/1.1.2 -``` - -## Result - -This guide sets up the following configuration: - -```text -HTTP requests with /foo -> Kong enforces rate-limit -> echo server - -HTTP requests with /bar -> Kong enforces rate-limit + -> echo-server - on example.com injects my-request-id header -``` diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/overview.md b/app/kubernetes-ingress-controller/1.1.x/guides/overview.md deleted file mode 100644 index ca89c71096df..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/overview.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Guides ---- - -Follow one of the guides to learn more about how to use -the {{site.kic_product_name}}: - -- [Getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started/) with the {{site.kic_product_name}} -- [Getting started using Istio](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started-istio/) with the {{site.kic_product_name}} and Istio -- [Using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins in Kong using a declarative - approach. -- [Using KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource/) - This guide explains how the KongIngress resource can be used to change Kong - specific settings like load-balancing, health-checking and proxy behaviour. -- [Using KongConsumer and Credential Resources](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) - This guide walks through how Kubernetes native declarative configuration - can be used to dynamically provision credentials for authentication purposes - in the Ingress layer. -- [Using JWT and ACL KongPlugin resources](/kubernetes-ingress-controller/{{page.release}}/guides/configure-acl-plugin/) - This guides walks you through configuring the JWT plugin and ACL plugin for - authentication purposes at the Ingress layer -- [Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager/) - This guide walks through how to use cert-manager along with Kong Ingress - Controller to automate TLS certificate provisioning and using them - to encrypt your API traffic. -- [Configuring a fallback service](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-fallback-service/) - This guide walks through how to setup a fallback service using Ingress - resource. The fallback service will receive all requests that don't - match against any of the defined Ingress rules. -- [Using external service](/kubernetes-ingress-controller/{{page.release}}/guides/using-external-service/) - This guide shows how to expose services running outside Kubernetes via Kong, - using [External Name](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) - Services in Kubernetes. -- [Configuring HTTPS redirects for your services](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-https-redirect/) - This guide walks through how to configure the {{site.kic_product_name}} to - redirect HTTP request to HTTPS so that all communication - from the external world to your APIs and microservices is encrypted. -- [Using Redis for rate-limiting](/kubernetes-ingress-controller/{{page.release}}/guides/redis-rate-limiting/) - This guide walks through how to use Redis for storing rate-limit information - in a multi-node Kong deployment. -- [Integrate the {{site.kic_product_name}} with Prometheus/Grafana](/kubernetes-ingress-controller/{{page.release}}/guides/prometheus-grafana/) - This guide walks through the steps of how to deploy the {{site.kic_product_name}} - and Prometheus to obtain metrics for the traffic flowing into your - Kubernetes cluster. -- [Configuring circuit-breaker and health-checking](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-health-checks/) - This guide walks through the usage of Circuit-breaking and health-checking - features of the {{site.kic_product_name}}. -- [Setting up custom plugin](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) - This guide walks through - installation of a custom plugin into Kong using - ConfigMaps and Volumes. -- [Using ingress with gRPC](/kubernetes-ingress-controller/{{page.release}}/guides/using-ingress-with-grpc/) - This guide walks through how to use the {{site.kic_product_name}} with gRPC. -- [Setting up upstream mTLS](/kubernetes-ingress-controller/{{page.release}}/guides/upstream-mtls/) - This guide gives an overview of how to setup mutual TLS authentication - between Kong and your upstream server. -- [Preserving Client IP address](/kubernetes-ingress-controller/{{page.release}}/guides/preserve-client-ip/) - This guide gives an overview of different methods to preserve the Client - IP address. -- [Using KongClusterPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins that can be shared across - Kubernetes namespaces. -- [Using Kong with Knative](/kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/) - This guide gives an overview of how to setup Kong as the Ingress point - for Knative workloads. -- [Exposing TCP-based service](/kubernetes-ingress-controller/{{page.release}}/guides/using-tcpingress/) - This guide gives an overview of how to use TCPIngress resource to expose - non-HTTP based services outside a Kubernetes cluster. -- [Using mtls-auth plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-mtls-auth-plugin/) - This guide gives an overview of how to use `mtls-auth` plugin and CA - certificates to authenticate requests using client certificates. -- [Using OpenID-connect plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-oidc-plugin/) - This guide walks through steps necessary to set up OIDC authentication. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/preserve-client-ip.md b/app/kubernetes-ingress-controller/1.1.x/guides/preserve-client-ip.md deleted file mode 100644 index 0a76e545bbf5..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/preserve-client-ip.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Preserving Client IP Address ---- - -Kong is usually deployed behind a Load Balancer (using a -Kubernetes Service of type `LoadBalancer`). This can result -in loss of actual Client IP address and Kong observing the IP address -of the Load Balancer as the client IP address. This guide lays -out different methods of solving this problem. - -Preserving the Client IP address in cloud behind Load-Balancers requires -configuration that is be specific to your use-case, cloud provider -and other architecture details. -[This document](https://kubernetes.io/docs/tutorials/services/source-ip/) -provides details on how networking works inside Kubernetes and explains -in detail of how various methods describe later in this document work. -It is recommended that you give this a read. - -Following methods are possible to preserve Client IP address: - -## ExternalTrafficPolicy: Local - -As explained in -[Kubernetes docs](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), -setting `service.spec.externalTrafficPolicy` to `Local` preserves the Client -IP address. You don't need to change any configuration in Kong if you -are using this method to preserve Client IP address. - -Please note that this is not supported by all of the public Cloud providers. - -## Proxy Protocol - -If you have an L4 Load Balancer that supports Proxy Protocol, and you're -terminating TCP connections at the Load Balancer before passing traffic -onward to Kong, then you can configure Kong to pick up the Client IP -address via this protocol. - -Once you have configured the Load Balancer to use Proxy Protocol, you -need to set the following environment variables in Kong for Kong to -receive the Client IP from the Proxy Protocol header. - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_PROXY_LISTEN`](/gateway/latest/reference/configuration/#proxy_listen) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) - -For example: - -``` -KONG_TRUSTED_IPS=0.0.0.0/0,::/0 # This trusts all IPs -KONG_PROXY_LISTEN="0.0.0.0:8000 proxy_protocol, 0.0.0.0:8443 ssl proxy_protocol" -KONG_REAL_IP_HEADER=proxy_protocol -``` - -## HTTP headers - -If you are using an L7 Load Balancer, i.e. HTTP requests are being terminated -at the Load Balancer, then you need to use `x-forwarded-for` or `x-real-ip` -header to preserve details of the connection between the Client and Load Balancer. - -You should configure the Load Balancer to inject these headers, and then -you need to set the following environment variables in Kong for Kong to pick up -the Client IP address from HTTP headers: - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) -- Optional [`KONG_REAL_IP_RECURSIVE`](/gateway/latest/reference/configuration/#real_ip_recursive) - -Please note that if you are using an L7 Load Balancer with Kong, -you cannot use the `certificates` feature in Kong as the TLS session is -already established between the Client and Load Balancer. - -## Cloud-provider specific details - -For the major public clouds, follow are some additional -details that can help you preserve the client IP address: - -### GKE - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### AKS - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### EKS - -You have two options: - -- L4 Load Balancer - In this case, you need to use the Proxy Protocol method to preserve Client IP - address. -- L7 Load Balancer - In this case, you need to use the HTTP headers method to preserve the Client - IP address. - -The recommend Load Balancer type for AWS is NLB. -You can choose the type of Load Balancer using the following annotation: - -``` -service.beta.kubernetes.io/aws-load-balancer-type: nlb -``` - -Other useful annotations for AWS are: - -``` -service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp -service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' -``` - -A complete list can be found -[here](https://gist.github.com/mgoodness/1a2926f3b02d8e8149c224d25cc57dc1). diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/prometheus-grafana.md b/app/kubernetes-ingress-controller/1.1.x/guides/prometheus-grafana.md deleted file mode 100644 index bf6d82958273..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/prometheus-grafana.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: Integrate the Kong Ingress Controller with Prometheus/Grafana ---- - -The {{site.kic_product_name}} can give you visibility not only into how Kong is -performing but also gives visibilty into -how the services in your Kubernetes cluster are responding to the -inbound traffic. - -This how-to guide walks through the steps of how to configure Kong and -Prometheus to collect metrics from the {{site.kic_product_name}}. - -> Note: This guide was originally posted on Kong Inc's blog: -[https://konghq.com/blog/observability-kubernetes-kong/](https://konghq.com/blog/observability-kubernetes-kong) - -## Prerequisites - -You’ll need a few things before we can start: - -- **Kubernetes cluster**: You can use Minikube or a GKE cluster for the - purpose of this tutorial. We are running a GKE Kubernetes cluster v1.12.x. -- **Helm**: We will be using [Helm](https://helm.sh/) - to install all of our components. - Tiller should be installed on your k8s cluster and - Helm CLI should be available on your workstation. - You can follow Helm’s quickstart guide to set up helm. - -Once you have Kubernetes and Helm set up, please proceed. - -Caution: Settings here are tweaked to keep this guide simple. -These settings are not meant for production usage. - -## Install Prometheus and Grafana - -If you already have Prometheus and Grafana installed on your Kubernetes cluster, -you can skip these steps. - -### Prometheus - -First, we will install Prometheus with a -scrape interval of 10 seconds to have fine-grained data points for all metrics. -We’ll install both Prometheus and Grafana in a dedicated `monitoring` namespace. - -To install Prometheus, execute the following: - -```bash -$ kubectl create namespace monitoring -$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -$ helm install prometheus prometheus-community/prometheus --namespace monitoring --values https://bit.ly/2RgzDtg --version 11.0.3 -``` - -### Grafana - -Grafana is installed with the following values for its Helm chart -(see comments for explanation): - -```yaml -persistence: - enabled: true # enable persistence using Persistent Volumes -datasources: - datasources.yaml: - apiVersion: 1 - Datasources: # configure Grafana to read metrics from Prometheus - - name: Prometheus - type: prometheus - url: http://prometheus-server # Since Prometheus is deployed in - access: proxy # same namespace, this resolves - # to the Prometheus Server we installed previous - isDefault: true # The default data source is Prometheus - -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' # Configure a dashboard provider file to - orgId: 1 # put Kong dashboard into. - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default -dashboards: - default: - kong-dash: - gnetId: 7424 # Install the following Grafana dashboard in the - revision: 5 # instance: https://grafana.com/dashboards/7424 - datasource: Prometheus -``` - -To install Grafana, execute the following: - -```bash -$ helm repo add grafana https://grafana.github.io/helm-charts -$ helm install grafana grafana/grafana --namespace monitoring --values http://bit.ly/2FuFVfV --version 5.0.8 -``` - -## Install Kong - -We will use Kong's Helm chart to install Kong -but you can also use plain manifests for this purpose. - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ kubectl create namespace kong -$ helm install mykong kong/kong --namespace kong --values https://bit.ly/2UAv0ZE -``` - -### Enable Prometheus plugin in Kong - -We will enable the Promtheus plugin in Kong at the global level, meaning -each request that flows into the Kubernetes cluster gets tracked in Prometheus: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: prometheus - annotations: - kubernetes.io/ingress.class: kong - labels: - global: "true" -plugin: prometheus -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/prometheus created -``` - -## Set Up Port Forwards - -Now, we will gain access to the components we just deployed. -In a production environment, you would have a Kubernetes Service with -an external IP or load balancer, which would allow you to access -Prometheus, Grafana, and Kong. -For demo purposes, we will set up port-forwarding using kubectl to get access. -It is not advisable to do this in production. - -Open a new terminal and execute the following commands: - -```bash -POD_NAME=$(kubectl get pods --namespace monitoring -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 9090 & - -# You can access Prometheus in your browser at localhost:9090 - -POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 3000 & - -# You can access Grafana in your browser at localhost:3000 - -POD_NAME=$(kubectl get pods --namespace kong -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace kong port-forward $POD_NAME 8000 & - -# Kong proxy port is now your localhost 8000 port -# We are using plain-text HTTP proxy for this purpose of -# demo. -# You can also use the LoadBalancer IP address and not set up this -# port-forwarding if you are running in a cloud environment. -``` - -## Access Grafana Dashboard - -To access Grafana, you need to get the password for the admin user. - -Execute the following to read the password and take note of it: - -```bash -kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo -``` - -Now, browse to [http://localhost:3000](http://localhost:3000) and -fill in username as “admin” and password as what you just noted above. -You should be logged in to Grafana and Kong’s Grafana Dashboard -should already be installed for you. - -## Setup Services - -We have all the components for monitoring installed, -we will now spin up some services for demo purposes and setup Ingress -routing for them. - -### Install Services - -We will set up three services: billing, invoice, and comments. -Execute the following to spin these services up: - -```bash -kubectl apply -f https://gist.githubusercontent.com/hbagdi/2d8ef66fe22cb99e1514f410f992268d/raw/a03d789b70c46ccd0b99d9f1ed838dc21419fc33/multiple-services.yaml -``` - -### Install Ingress for the Services - -Next, once the services are up and running, we will create Ingress -routing rules in Kubernetes. -This will configure Kong to proxy traffic destined for these services correctly. - -Execute the following: - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: sample-ingresses - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /billing - backend: - serviceName: billing - servicePort: 80 - - path: /comments - backend: - serviceName: comments - servicePort: 80 - - path: /invoice - backend: - serviceName: invoice - servicePort: 80 -' | kubectl apply -f - -``` - -## Let’s Create Some Traffic - -We’re done configuring our services and proxies. -Time to see if our setup works. -Execute the following in a new terminal: - -```bash -while true; -do - curl http://localhost:8000/billing/status/200 - curl http://localhost:8000/billing/status/501 - curl http://localhost:8000/invoice/status/201 - curl http://localhost:8000/invoice/status/404 - curl http://localhost:8000/comments/status/200 - curl http://localhost:8000/comments/status/200 - sleep 0.01 -done -``` - -Since we have already enabled Prometheus plugin in Kong to -collect metrics for requests proxied via Kong, -we should see metrics coming through in the Grafana dashboard. - -You should be able to see metrics related to the traffic flowing -through our services. -Try tweaking the above script to send different traffic patterns -and see how the metrics change. -The upstream services are httpbin instances, meaning you can use -a variety of endpoints to shape your traffic. - -## Metrics collected - -### Request Latencies of Various Services - -![Request latencies](/assets/images/products/kubernetes-ingress-controller/request-latencies.png) - -Kong collects latency data of how long your services take to respond to -requests. One can use this data to alert the on-call engineer if the latency -goes beyond a certain threshold. For example, let’s say you have an SLA -that your APIs will respond with latency of less than 20 millisecond -for 95% of the requests. -You could configure Prometheus to alert based on the following query: - -```text -histogram_quantile(0.95, sum(rate(kong_latency_bucket{type="request"}[1m])) by (le,service)) > 20 -``` - -The query calculates the 95th percentile of the the total request -latency (or duration) for all of your services and alerts you if it is more -than 20 milliseconds. -The “type” label in this query is “request”, which tracks the latency -added by Kong and the service. -You can switch this to “upstream” to track latency added by the service only. -Prometheus is highly flexible and well documented, so we won’t go into -details of setting up alerts here, but you’ll be able to find them -in the Prometheus documentation. - -### Kong Proxy Latency - -![Proxy latencies](/assets/images/products/kubernetes-ingress-controller/proxy-latencies.png) - -Kong also collects metrics about its performance. -The following query is similar to the previous one but gives -us insight into latency added by Kong: - -```text -histogram_quantile(0.90, sum(rate(kong_latency_bucket{type="kong"}[1m])) by (le,service)) > 2 -``` - -### Error Rates - -![Error rates](/assets/images/products/kubernetes-ingress-controller/error-rates.png) - -Another important metric to track is the rate of errors and requests -your services are serving. -The time series `kong_http_status` collects HTTP status code metrics -for each service. - -This metric can help you track the rate of errors for each of your service: - -```text -sum(rate(kong_http_status{code=~"5[0-9]{2}"}[1m])) by (service) -``` - -You can also calculate the percentage of requests in any duration -that are errors. Try to come up with a query to derive that result. - -Please note that all HTTP status codes are indexed, meaning you could use -the data to learn about your typical traffic pattern and identify problems. -For example, a sudden rise in 404 response codes could be indicative -of client codes requesting an endpoint that was removed in a recent deploy. - -### Request Rate and Bandwidth - -![Request rates](/assets/images/products/kubernetes-ingress-controller/request-rate.png) - -One can derive the total request rate for each of your services or -across your Kubernetes cluster using the `kong_http_status` time series. - -![Bandwidth](/assets/images/products/kubernetes-ingress-controller/bandwidth.png) - -Another metric that Kong keeps track of is the amount of -network bandwidth (`kong_bandwidth`) being consumed. -This gives you an estimate of how request/response sizes -correlate with other behaviors in your infrastructure. - -You now have metrics for the services running inside your Kubernetes cluster -and have much more visibility into your applications, without making -any modifications in your services. -You can use Alertmanager or Grafana to now configure alerts based on -the metrics observed and your SLOs. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/redis-rate-limiting.md b/app/kubernetes-ingress-controller/1.1.x/guides/redis-rate-limiting.md deleted file mode 100644 index 64f13251f006..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/redis-rate-limiting.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Using Redis for rate-limiting ---- - -Kong can rate-limit your traffic without any external dependency. -In such a case, Kong stores the request counters in-memory -and each Kong node applies the rate-limiting policy independently. -There is no synchronization of information being done in this case. -But if Redis is available in your cluster, Kong -can take advantage of it and synchronize the rate-limit information -across multiple Kong nodes and enforce a slightly different rate-limiting -policy. - -This guide walks through the steps of using Redis for rate-limiting in -a multi-node Kong deployment. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Set up rate-limiting - -We will start by creating a global rate-limiting policy: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -Here we are configuring the {{site.kic_product_name}} to rate-limit traffic from -any client to 5 requests per minute, and we are applying this policy in a -global sense, meaning the rate-limit will apply across all services. - -You can set this up for a specific Ingress or a specific service as well, -please follow [using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) -guide on steps for doing that. - -Next, test the rate-limiting policy by executing the following command -multiple times and observe the rate-limit headers in the response: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -As there is a single Kong instance running, Kong correctly imposes -the rate-limit and you can make only 5 requests in a minute. - -## Scale the controller to multiple pods - -Now, let's scale up the {{site.kic_product_name}} deployment to 3 pods, for -scalability and redundancy: - -```bash -$ kubectl scale --replicas 3 -n kong deployment ingress-kong -deployment.extensions/ingress-kong scaled -``` - -It will take a couple minutes for the new pods to start up. -Once the new pods are up and running, test the rate-limiting policy by -executing the following command and observing the rate-limit headers: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -You will observe that the rate-limit is not consistent anymore -and you can make more than 5 requests in a minute. - -To understand this behavior, we need to understand how we have configured Kong. -In the current policy, each Kong node is tracking a rate-limit in-memory -and it will allow 5 requests to go through for a client. -There is no synchronization of the rate-limit information across Kong nodes. -In use-cases where rate-limiting is used as a protection mechanism and to -avoid over-loading your services, each Kong node tracking its own counter -for requests is good enough as a malicious user will hit rate-limits on all -nodes eventually. -Or if the load-balancer in-front of Kong is performing some -sort of deterministic hashing of requests such that the same Kong node always -receives the requests from a client, then we won't have this problem at all. - -In some cases, a synchronization of information that each Kong node maintains -in-memory is needed. For that purpose, Redis can be used. -Let's go ahead and set this up next. - -## Deploy Redis to your Kubernetes cluster - -First, we will deploy redis in our Kubernetes cluster: - -```bash -$ kubectl apply -n kong -f https://bit.ly/k8s-redis -deployment.apps/redis created -service/redis created -``` - -Once this is deployed, let's update our KongClusterPlugin configuration to use -Redis as a datastore rather than each Kong node storing the counter information -in-memory: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: redis - redis_host: redis -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit configured -``` - -Notice, how the `policy` is now set to `redis` and we have configured Kong -to talk to the `redis` server available at `redis` DNS name, which is the -Redis node we deployed earlier. - -## Test it - -Now, if you go ahead and execute the following commands, you should be able -to make only 5 requests in a minute: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -This guide shows how to use Redis as a data-store for rate-limiting plugin, -but this can be used for other plugins which support Redis as a data-store -like proxy-cache. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/setting-up-custom-plugins.md b/app/kubernetes-ingress-controller/1.1.x/guides/setting-up-custom-plugins.md deleted file mode 100644 index bb56a4b3b174..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/setting-up-custom-plugins.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Setting up custom plugin in Kubernetes environment ---- - -This guide goes through steps on installing a custom plugin -in Kong without using a Docker build. - -## Prepare a directory with plugin code - -First, we need to create either a ConfigMap or a Secret with -the plugin code inside it. -If you would like to install a plugin which is available as -a rock from Luarocks, then you need to download it, unzip it and create a -ConfigMap from all the Lua files of the plugin. - -We are going to setup a dummy plugin next. -If you already have a real plugin, you can skip this step. - -```shell -$ mkdir myheader && cd myheader -$ echo 'local MyHeader = {} - -MyHeader.PRIORITY = 1000 - -function MyHeader:header_filter(conf) - -- do custom logic here - kong.response.set_header("myheader", conf.header_value) -end - -return MyHeader -' > handler.lua - -$ echo 'return { - name = "myheader", - fields = { - { config = { - type = "record", - fields = { - { header_value = { type = "string", default = "roar", }, }, - }, - }, }, - } -} -' > schema.lua -``` - -Once we have our plugin code available in a directory, -the directory should look something like this: - -```shell -$ tree myheader -myheader -├── handler.lua -└── schema.lua - -0 directories, 2 files -``` - -You might have more files inside the directory as well. - -## Create a ConfigMap or Secret with the plugin code - -Next, we are going to create a ConfigMap or Secret based on the plugin -code. - -Please ensure that this is created in the same namespace as the one -in which Kong is going to be installed. - -```shell -# using ConfigMap; replace `myheader` with the name of your plugin -$ kubectl create configmap kong-plugin-myheader --from-file=myheader -n kong -configmap/kong-plugin-myheader created - -# OR using Secret -$ kubectl create secret generic -n kong kong-plugin-myheader --from-file=myheader -secret/kong-plugin-myheader created -``` - -## Modify configuration - -Next, we need to update Kong's Deployment to load our custom plugin. - -Based on your installation method, this step will differ slightly. -The next section explains what changes are necessary. - -### YAML - -The following patch is necessary to load the plugin. -Notable changes: -- The plugin code is mounted into the pod via `volumeMounts` and `volumes` - configuration property. -- `KONG_PLUGINS` environment variable is set to include the custom plugin - along with all the plugins that come in Kong by default. -- `KONG_LUA_PACKAGE_PATH` environment variable directs Kong to look - for plugins in the directory where we are mounting them. - -If you have multiple plugins, simply mount multiple -ConfigMaps and include the plugin name in the `KONG_PLUGINS` -environment variable. - -> Please note that if your plugin code involves database - migration then you need to include the below patch to pod definition of your - migration Job as well. - -Please note that the below is not a complete definition of -the Deployment but merely a strategic patch which can be applied to -an existing Deployment. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ingress-kong - namespace: kong -spec: - template: - spec: - containers: - - name: proxy - env: - - name: KONG_PLUGINS - value: bundled,myheader - - name: KONG_LUA_PACKAGE_PATH - value: "/opt/?.lua;;" - volumeMounts: - - name: kong-plugin-myheader - mountPath: /opt/kong/plugins/myheader - volumes: - - name: kong-plugin-myheader - configMap: - name: kong-plugin-myheader -``` - -### Helm chart - -With Helm, this is as simple as adding the following values to -your `values.yaml` file: - -```yaml -# values.yaml -plugins: - configMaps: # change this to 'secrets' if you created a secret - - name: kong-plugin-myheader - pluginName: myheader -``` - -The chart automatically configures all the environment variables based on the -plugins you inject. - -Please ensure that you add in other configuration values -you might need for your installation to work. - -### Deploy - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Once, you have all the pieces in place, you are ready -to deploy the {{site.kic_product_name}}: - -```shell -# using YAML or kustomize -kustomize build github.com/hbagdi/yaml/kong/kong-custom-plugin | kubectl apply -f - - -# or helm -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong --values values.yaml - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false --values values.yaml -``` - -Once you have setup Kong with the custom plugin installed, you can use it -like any other plugin. - -First, create a `KongPlugin` custom resource: - -```yaml -echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: my-custom-plugin -config: - header_value: "my first plugin" -plugin: myheader -" | kubectl apply -f - -``` - -and then can annotate an Ingress or Service resource to instruct -Kong on when to execute the plugin: - -```yaml -konghq.com/plugins: my-custom-plugin -``` - -Once you have got Kong up and running, configure your -custom plugin via [KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/). - - -### Plugins in other languages - -When deploying custom plugins in other languages, especially Golang, the built binary is larger than -the size limit of ConfigMap. In such cases, consider using an init container to pull large binaries from -remotes like S3 buckets, or build a custom image that includes plugin runtimes and the plugin itself. - -To read more about building a custom image, see -[use external plugins in container and Kubernetes](/gateway/latest/reference/external-plugins/#use-external-plugins-in-container-and-kubernetes). diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/upstream-mtls.md b/app/kubernetes-ingress-controller/1.1.x/guides/upstream-mtls.md deleted file mode 100644 index 7a6e50c40656..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/upstream-mtls.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Using mTLS with Kong ---- - -This guide walks through on how to setup Kong to perform mutual-TLS -authentication with an upstream service. - -> Please note that this guide walks through mTLS configuration between -Kong and a Service and not Kong and a client or consumer. - -## What is mTLS? - -Mutual authentication refers to two-way authencation, where the client and -server, both can authenticate themselves to the other party. - -With mutual TLS authentication, client and server both present TLS -certificates to the other party (and can prove their identity using their -private key) during the TLS handshake. They can verify the other's -certificate using the their trusted CAs. - -## mTLS with Kong - -Kong 1.3 and above support mutual TLS authentication between Kong and the -upstream service. - -Let's take a look at how one can configure it. - -## Configure Kong to verify upstream server certificate - -Kong, by default, does not verify the certificate presented by the upstream -service. - -To enforce certificate verification, you need to configure the following -environment variables on Kong's container in your deployment: - -``` -KONG_NGINX_PROXY_PROXY_SSL_VERIFY="on" -KONG_NGINX_PROXY_PROXY_SSL_VERIFY_DEPTH="3" -KONG_NGINX_PROXY_PROXY_SSL_TRUSTED_CERTIFICATE="/path/to/ca_certs.pem" -``` - -These basically translate to -[NGINX directives](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) -to configure NGINX to verify certificates. - -Please make sure that the trusted certificates are correctly -mounted into Kong's container and the path to certificate is correctly -reflected in the above environment variable. - -## Configure Kong to present its certificate to the upstream server - -In the above section, we achieved one side of mutual authentication, -where Kong has been configured to verify the identity of the upstream server. - -In this section, we will configure Kong to present its identity to the -upstream server. - -To configure this, you have two options, depending on your use-case. -If you would like Kong to present its client certificate to each and every -service that it talks to, you can configure the client certificate -at the global level using Nginx directives. -If you would like to configure a different certificate for -each service that Kong talks to or want to configure Kong to present a -client certificate only to a subset of all services that it is configured to -communicate with, then you can configure that using an annotation on -the Kubernetes Service resource. - -### Global Nginx directive - -You need to configure two Nginx directives for this purpose: -- [`proxy_ssl_certificate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) -- [`proxy_ssl_certificate_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) - -You can mount the certificate and key pair using secrets into the Kong pod -and then set the following two environment variables to set the above two -directives: - -``` -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE="/path/to/client_cert.pem" -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE_KEY="/path/to/key.pem" -``` - -Once configured, Kong will present its client certificate to every upstream -server that it talks to. - -### Per service annotation - -To configure a different client certificate for each service or only for a -subset of services, you can do so using the -[`konghq.com/client-cert`](/kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcom/client-cert) -annotation. - -To use the annotation, you first need to create a TLS secret with the -client certificate and key in Kubernetes. -The secret should be created in the same namespace as your Kubernetes -Service to which Kong should authenticate itself. - -Once the secret is in place, add the follow annotation on the service: - -``` -konghq.com/client-cert: -``` - -Kong will then use the TLS key-pair to authenticate itself against that service. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-consumer-credential-resource.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-consumer-credential-resource.md deleted file mode 100644 index 8c1acc30e7f8..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-consumer-credential-resource.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Provisioning Consumers and Credentials ---- - -This guide walks through how to use the KongConsumer custom -resource and use Secret resources to associate credentials with those -consumers. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Add authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. - -Let's add a KongPlugin resource to protect the API: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Now, associate this plugin with the previous Ingress rule we created -using the `konghq.com/plugins` annotation: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - konghq.com/plugins: httpbin-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any request matching the proxying rules defined in the `demo` ingress will -now require a valid API key: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 19:30:33 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -{"message":"No API key found in request"} -``` - -As you can see above, Kong returns back a `401 Unauthorized` because -we didn't provide an API key. - -## Provision a Consumer - -Let's create a KongConsumer resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, let's provision an API-key associated with -this consumer so that we can pass the authentication imposed by Kong: - -Next, we will create a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) -resource with an API-key inside it: - -```bash -$ kubectl create secret generic harry-apikey \ - --from-literal=kongCredType=key-auth \ - --from-literal=key=my-sooper-secret-key -secret/harry-apikey created -``` - -The type of credential is specified via `kongCredType`. -You can create the Secret using any other method as well. - -Since we are using the Secret resource, -Kubernetes will encrypt and store this API-key for us. - -Next, we will associate this API-key with the consumer we created previously. - -Please note that we are not re-creating the KongConsumer resource but -only updating it to add the `credentials` array: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -## Use the credential - -Now, use the credential to pass authentication: - -```bash -$ curl -i -H 'apikey: my-sooper-secret-key' $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:34:44 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -In this guide, we learned how to leverage an authentication plugin in Kong -and provision credentials. This enables you to offload authentication into -your Ingress layer and keeps the application logic simple. - -All other authentication plugins bundled with Kong work in this -way and can be used to quickly add an authentication layer on top of -your microservices. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-external-service.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-external-service.md deleted file mode 100644 index 3e47325ff76c..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-external-service.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Expose an external application ---- - -This example shows how we can expose a service located outside the Kubernetes cluster using an Ingress. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a Kubernetes service - -First we need to create a Kubernetes Service [type=ExternalName][0] using the hostname of the application we want to expose. - -```bash -echo " -kind: Service -apiVersion: v1 -metadata: - name: proxy-to-httpbin -spec: - ports: - - protocol: TCP - port: 80 - type: ExternalName - externalName: httpbin.org -" | kubectl create -f - -``` - -## Create an Ingress to expose the service at the path `/foo` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: proxy-from-k8s-to-httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: proxy-to-httpbin - servicePort: 80 -' | kubectl create -f - -``` - -## Test the service - -```bash -$ curl -i $PROXY_IP/foo -``` - -[0]: https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-ingress-with-grpc.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-ingress-with-grpc.md deleted file mode 100644 index 64995357181b..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-ingress-with-grpc.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Using Ingress with gRPC ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Pre-requisite - -To make `gRPC` requests, you need a client which can invoke gRPC requests. -In this guide, we use -[`grpcurl`](https://github.com/fullstorydev/grpcurl#installation). -Please ensure that you have that installed in on your local system. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -#### Running GRPC - -1. Add a grpc deployment and service - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/sample-apps/grpc.yaml -service/grpcbin created -deployment.apps/grpcbin created -``` -1. Create a demo grpc ingress rule: - -```bash -$ echo "apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: grpcbin - servicePort: 9001" | kubectl apply -f - -ingress.extensions/demo created -``` -1. Next, we need to update the Ingress rule to specify gRPC as the protocol. -By default, all routes are assumed to be either HTTP or HTTPS. This annotation -informs Kong that this route is a gRPC(s) route and not a plain HTTP route: - -``` -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"grpc,grpcs"}}}' -``` - -1. Next, we also update the upstream protocol to be `grpcs`. -Similar to routes, Kong assumes that services are HTTP-based by default. -With this annotation, we configure Kong to use gRPCs protocol when it -talks to the upstream service: - -``` -$ kubectl patch svc grpcbin -p '{"metadata":{"annotations":{"konghq.com/protocol":"grpcs"}}}' -``` - -1. You should be able to run a request over `gRPC`: - -``` -$ grpcurl -v -d '{"greeting": "Kong Hello world!"}' -insecure $PROXY_IP:443 hello.HelloService.SayHello -``` diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-kong-with-knative.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-kong-with-knative.md deleted file mode 100644 index 7fe55abf04fc..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-kong-with-knative.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: Using Kong with Knative ---- - -The {{site.kic_product_name}} supports managing ingress traffic for -serverless workloads managed via Knative. - -In this guide, we will learn how to use Kong with Knative services and -configure plugins for Knative services. - - -## Pre-requisite - -This guide will be easier to follow if you have access to a Kubernetes -cluster that is running in the cloud rather than Minikube or any other -local environment. The guide requires access to DNS and a public IP -address or DNS name will certainly keep things simpler and easy for you. - -## Install Knative - -If you don't have knative installed, you need to install Knative: - -``` -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-crds.yaml -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-core.yaml -``` - -This will install the resources that are required to run Knative. - -## Install Kong - -Next, install the {{site.kic_product_name}}: - -``` -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -``` - -You can choose to install a different flavor, like using a database, -or using an Enterprise installation instead of Open-Source. You can also -use Helm installation method if that works for you. - -Once Kong is installed, -you should note down the IP address or public CNAME of -`kong-proxy` service. - -In the current case case, - -```shell -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.248.154 35.247.39.83 80:30345/TCP,443:31872/TCP 53m -``` - -Take a note of the above IP address "`35.247.39.83`". This will be different -for your installation. - -## Configure Knative to use Kong for Ingress - -### Ingress class - -Next, we will configure Knative to use `kong` as the Ingress class: - -``` -$ kubectl patch configmap/config-network \ - --namespace knative-serving \ - --type merge \ - --patch '{"data":{"ingress.class":"kong"}}' -``` - -## Setup Knative domain - -As the final step, we need to configure Knative's base domain at which -our services will be accessible. - -We override the default ConfigMap with the DNS name of `${KONG_IP}.xip.io`. -This will be different for you: - -``` -$ echo ' -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-domain - namespace: knative-serving - labels: - serving.knative.dev/release: v0.13.0 -data: - 35.247.39.83.xip.io: "" -' | kubectl apply -f - -configmap/config-domain configured -``` - -Once this is done, the setup is complete and we can move onto using Knative -and Kong. - -## Test connectivity to Kong - -Send a request to the above domain that we have configured: - -```bash -curl -i http://35.247.39.83.xip.io/ -HTTP/1.1 404 Not Found -Date: Wed, 11 Mar 2020 00:18:49 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -X-Kong-Response-Latency: 1 -Server: kong/1.4.3 - -{"message":"no Route matched with those values"} -``` - -The 404 response is expected since we have not configured any services -in Knative yet. - -## Install a Knative Service - -Let's install our first Knative service: - -``` -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -``` - -It can take a couple of minutes for everything to get configured but -eventually, you will see the URL of the Service. -Let's make the call to the URL: - -```shell -$ curl -v http://helloworld-go.default..xip.io -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Tue, 10 Mar 2020 23:45:14 GMT -X-Kong-Upstream-Latency: 2723 -X-Kong-Proxy-Latency: 0 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -The request is served by Knative and from the response HTTP headers, -we can tell that the request was proxied by Kong. - -The first request will also take longer to complete as Knative will spin -up a new Pod to service the request. -We can see how Kong observed this latency and recorded it in the -`X-Kong-Upstream-Latency` header. -If you perform subsequent requests, -they should complete much faster. - -## Plugins for knative services - -Let's now execute a plugin for our new Knative service. - -First, let's create a KongPlugin resource: - -```shell -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong -plugin: response-transformer -" | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will update the Knative service created before and add in -annotation in the template: - -```shell -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - metadata: - annotations: - konghq.com/plugins: add-response-header - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -service.serving.knative.dev/helloworld-go configured -``` - -Please note that the annotation `konghq.com/plugins` is -not added to the Service definition -itself but to the `spec.template.metadata.annotations`. - -Let's make the request again: - -```shell -$ curl -i http://helloworld-go.default.35.247.39.83.xip.io/ -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Wed, 11 Mar 2020 00:35:07 GMT -demo: injected-by-kong -X-Kong-Upstream-Latency: 2455 -X-Kong-Proxy-Latency: 1 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -As we can see, the response has the `demo` header injected. - -This guide demonstrates the power of using Kong and Knative together. -Checkout other plugins and try them out with multiple Knative services. -The possibilities are endless! diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongclusterplugin-resource.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-kongclusterplugin-resource.md deleted file mode 100644 index 141f2043f806..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongclusterplugin-resource.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Using KongClusterPlugin resource ---- - -In this guide, we will learn how to use KongClusterPlugin resource to configure -plugins in Kong. -The guide will cover configuring a plugin for services across different -namespaces. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service in their corresponding namespaces. - -```bash -$ kubectl create namespace httpbin -namespace/httpbin created -$ kubectl apply -n httpbin -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: httpbin-app - namespace: httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created - -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: echo-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -# access httpbin service -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# access echo service -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -## Create KongClusterPlugin resource - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header created -``` - -Note how the resource is created at cluster-level and not in any specific -namespace: - -```bash -$ kubectl get kongclusterplugins -NAME PLUGIN-TYPE AGE -add-response-header response-transformer 4s -``` - -If you send requests to `PROXY_IP` now, you will see that the header is not -injected in the responses. The reason being that we have created a -resource but we have not told Kong when to execute the plugin. - -## Configuring plugins on Ingress resources - -We will associate the `KongClusterPlugin` resource with the two Ingress resources -that we previously created: - -```bash -$ kubectl patch ingress -n httpbin httpbin-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/httpbin-app patched - -$ kubectl patch ingress -n echo echo-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/echo-app patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching any of the above two Ingress rules is -processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in our two Ingress rules. - -## Updating plugin configuration - -Now, let's update the plugin configuration to change the header value from -`injected-by-kong` to `injected-by-kong-for-kubernetes`: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong-for-kubernetes" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header configured -``` - -If you repeat the requests from the last step, you will see Kong -now responds with updated header value. - -This guides demonstrates how plugin configuration can be shared across -services running in different namespaces. -This can prove to be useful if the persona controlling the plugin -configuration is different from service owners that are responsible for the -Service and Ingress resources in Kubernetes. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongingress-resource.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-kongingress-resource.md deleted file mode 100644 index de12faa0ea8f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongingress-resource.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -title: Using KongIngress resource ---- - -In this guide, we will learn how to use KongIngress resource to control -proxy behavior. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Install a dummy service - -We will start by installing the echo service. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/foo - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/foo -``` - -## Use KongIngress with Ingress resource - -By default, Kong will proxy the entire path to the service. -This can be seen in the real path value in the above response. - -We can configure Kong to strip out the part of the path defined in the -Ingress rule and to only respond to GET requests for this particular rule. - -To modify these behaviours, let's first create a KongIngress resource -defining the new behaviour: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: sample-customization -route: - methods: - - GET - strip_path: true" | kubectl apply -f - -kongingress.configuration.konghq.com/test created -``` - -Now, let's associate this KongIngress resource with our Ingress resource -using the `konghq.com/override` annotation. - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/override":"sample-customization"}}}' -ingress.extensions/demo patched -``` - -Now, Kong will proxy only GET requests on `/foo` path and -strip away `/foo`: - -```bash -$ curl -s $PROXY_IP/foo -X POST -{"message":"no Route matched with those values"} - - -$ curl -s $PROXY_IP/foo/baz - - -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/baz -``` - -As you can see, the real path value is `/baz`. - -## Use KongIngress with Service resource - -KongIngress can be used to change load-balancing, health-checking and other -proxy behaviours in Kong. - -Next, we are going to tweak two settings: - -- Configure Kong to hash the requests based on IP address of the client. -- Configure Kong to proxy all the request on `/foo` to `/bar`. - -Let's create a KongIngress resource with these settings: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-customization -upstream: - hash_on: ip -proxy: - path: /bar/' | kubectl apply -f - -kongingress.configuration.konghq.com/demo-customization created -``` - -Now, let's associate this KongIngress resource to the echo service. - -```bash -$ kubectl patch service echo -p '{"metadata":{"annotations":{"konghq.com/override":"demo-customization"}}}' -service/echo patched -``` - -Let's test this now: - -```bash -$ curl $PROXY_IP/foo/baz -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/bar/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/bar/baz - -<-- clipped --> -``` - -Real path received by the upstream service (echo) is now changed to `/bar/baz`. - -Also, now all the requests will be sent to the same upstream pod: - -```bash -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -``` - - -You can experiement with various load balancing and healthchecking settings -that KongIngress resource exposes to suit your specific use case. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongplugin-resource.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-kongplugin-resource.md deleted file mode 100644 index 02ce7664b7a4..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-kongplugin-resource.md +++ /dev/null @@ -1,469 +0,0 @@ ---- -title: Using KongPlugin resource ---- - -In this guide, we will learn how to use KongPlugin resource to configure -plugins in Kong to modify requests for a specific request path. -The guide will cover configuring a plugin for a specific service, a set of Ingress rules -and for a specific user of the API. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 - - path: /bar - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - - - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -Let's add another Ingress resource which proxies requests to `/baz` to httpbin -service: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-2 - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /baz - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-2 created -``` - -We will use this path later. - -## Configuring plugins on Ingress resource - -Next, we will configure two plugins on the Ingress resource. - -First, we will create a KongPlugin resource: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will associate it with our Ingress rules: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/demo patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching the Ingress rule is processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in the `demo` Ingress resource. - -If we send a request to `/baz`, then we can see that the header is not injected -by Kong: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:56:20 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Here, we have successfully setup a plugin which is executed only when a -request matches a specific `Ingress` rule. - -## Configuring plugins on Service resource - -Next, we will see how we can configure Kong to execute plugins for requests -which are sent to a specific service. - -Let's add a `KongPlugin` resource for authentication on the httpbin service: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Next, we will associate this plugin to the httpbin service running in our -cluster: - -```bash -$ kubectl patch service httpbin -p '{"metadata":{"annotations":{"konghq.com/plugins":"httpbin-auth"}}}' -service/httpbin patched -``` - -Now, any request sent to the service will require authentication, -no matter which `Ingress` rule it matched: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:09:04 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:12:13 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -demo: injected-by-kong -Server: kong/1.2.1 -``` - -You can also see how the `demo` header was injected as the request also -matched one of the rules defined in the `demo` `Ingress` resource. - -## Configure consumer and credential - -Follow the [Using Consumers and Credentials](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) -guide to provision a user and an apikey. -Once you have it, please continue: - -Use the API key to pass authentication: - -```bash -$ curl -I $PROXY_IP/baz -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:16:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:15:34 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 -``` - -## Configure a global plugin - -Now, we will protect our Kubernetes cluster. -For this, we will be configuring a rate-limiting plugin, which -will throttle requests coming from the same client. - -This must be a cluster-level `KongClusterPlugin` resource, as `KongPlugin` -resources cannot be applied globally, to preserve Kubernetes RBAC guarantees -for cross-namespace isolation. - -Let's create the `KongClusterPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -With this plugin (please note the `global` label), every request through -the {{site.kic_product_name}} will be rate-limited: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Configure a plugin for a specific consumer - -Now, let's say we would like to give a specific consumer a higher rate-limit. - -For this, we can create a `KongPlugin` resource and then associate it with -a specific consumer. - -First, create the `KongPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: harry-rate-limit -config: - minute: 10 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/harry-rate-limit created -``` - -Next, associate this with the consumer: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong - konghq.com/plugins: harry-rate-limit -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -Note the annotation being added to the `KongConsumer` resource. - -Now, if the request is made as the `harry` consumer, the client -will be rate-limited differently: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 10 -X-RateLimit-Remaining-minute: 9 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# a regular unauthenticated request -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -This guide demonstrates how you can use the {{site.kic_product_name}} to -impose restrictions and transformations -on various levels using Kubernetes style APIs. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-mtls-auth-plugin.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-mtls-auth-plugin.md deleted file mode 100644 index cd54433a09a7..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-mtls-auth-plugin.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Using mtls-auth plugin ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -verify client certificates using CA certificates and -[mtls-auth](https://docs.konghq.com/hub/kong-inc/mtls-auth/) plugin -for HTTPS requests. - -> Note: You need an Enterprise license to use this feature. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -Kong for Kubernetes Enterprise on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise) to configure -this environment variable. - -If everything is set up correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Provision a CA certificate in Kong - -CA certificates in Kong are provisioned by create a `Secret` resource in -Kubernetes. - -The secret resource must have a few properties: -- It must have the `konghq.com/ca-cert: "true"` label. -- It must have a `cert` data property which contains a valid CA certificate - in PEM format. -- It must have an `id` data property which contains a random UUID. -- It must have a `kubernetes.io/ingress.class` annotation whose value matches - the value of the controller's `--ingress-class` argument. By default, that - value is "kong". - -Note that a self-signed CA certificate is being used for the purpose of this -guide. You should use your own CA certificate that is backed by -your PKI infrastructure. - -**This example is used to show the YAML format of a `Secret` resource for the CA certificate. DO NOT directly use the certificate here. -You should use your own CA certificate, or generate a self-signed certificate for testing.** To generate self-signed CA certificates, follow these instructions: - -```bash -openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes\ - -subj "/C=US/ST=California/L=San Francisco/O=Kong/OU=Org/CN=www.example.com" -``` - -```bash -$ echo "apiVersion: v1 -kind: Secret -metadata: - name: my-ca-cert - annotations: - kubernetes.io/ingress.class: kong - labels: - konghq.com/ca-cert: 'true' -type: Opaque -stringData: - cert: | - -----BEGIN CERTIFICATE----- - MIICwTCCAamgAwIBAgIUHGUzUWvHJHrREvIZIcORiFUvze4wDQYJKoZIhvcNAQEL - BQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcNMjAwNTA4MjExODA1WhcNMjAwNjA3MjEx - ODA1WjAQMQ4wDAYDVQQDDAVIZWxsbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC - AQoCggEBANCMMBngjuTvqts8ZXtZhqdr181QH/NmytW1KlyqZd6ppXUer+i0OWhP - 1nAyHsBPJljKAFLd8l1EioPFkN78/wJFDJrHOtfniIQPVLdS2cnNQ72dLyQH6smH - JQDV8ePBQ2GdRP6s61+Da8eoaW6nSLtmEUhxvyteboqwmi2CtUtAfuiU1m5sOdpS - z+L4D08CE+SFIT4MGD3gxNdg7lccWCHIfk54VRSdGDKEVwed8OQvxD0TdpHY+ym5 - nJ4JSkhiS9XIodnxR3AZ6rIPRqk+MQ4LGTjX2EbM0/Yg4qvnZ7m4fcpK2goDZIVL - EF8F+ka1RaAYWTsXI1BAkJbb3kdo/yUCAwEAAaMTMBEwDwYDVR0TBAgwBgEB/wIB - ADANBgkqhkiG9w0BAQsFAAOCAQEAVvB/PeVZpeQ7q2IQQQpADtTd8+22Ma3jNZQD - EkWGZEQLkRws4EJNCCIvkApzpx1GqRcLLL9lbV+iCSiIdlR5W9HtK07VZ318gpsG - aTMNrP9/2XWTBzdHWaeZKmRKB04H4z7V2Dl58D+wxjdqNWsMIHeqqPNKGamk/q8k - YFNqNwisRxMhU6qPOpOj5Swl2jLTuVMAeGWBWmPGU2MUoaJb8sc2Vix9KXcyDZIr - eidkzkqSrjNzI0yJ2gdCDRS4/Rw9iV3B3SRMs0mJMLBDrsowhNfLAd8I3NHzLwps - dZFcvZcT/p717K3hlFVdjGnKIgKcG7aYji/XRR87HKnc+cJMCw== - -----END CERTIFICATE----- - id: cce8c384-721f-4f58-85dd-50834e3e733a" | kubectl create -f - -secret/my-ca-cert created -``` - -Please note the ID, you can use this ID one or use a different one but -the ID is important in the next step when we create the plugin. -Each CA certificate that you create needs a unique ID. -Any random UUID will suffice here and it doesn't have an security -implication. - -You can use [uuidgen](https://linux.die.net/man/1/uuidgen) (Linux, OS X) or -[New-Guid](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/new-guid) -(Windows) to generate an ID. - -For example: -```bash -$ uuidgen -907821fc-cd09-4186-afb5-0b06530f2524 -``` - -## Configure mtls-auth plugin - -Next, we are going to create an `mtls-auth` KongPlugin resource which references -CA certificate provisioned in the last step: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: mtls-auth -config: - ca_certificates: - - cce8c384-721f-4f58-85dd-50834e3e733a - skip_consumer_lookup: true - revocation_check_mode: SKIP -plugin: mtls-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/mtls-auth created -``` - -## Install a dummy service - -Let's deploy an echo service which we wish to protect -using TLS client certificate authentication. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -You can deploy a different service or skip this step if you already -have a service deployed in Kubernetes. - -## Set up Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/plugins: mtls-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -## Test the endpoint - -Now, let's test to see if Kong is asking for client certificate -or not when we make the request: - -``` -$ curl -k https://$PROXY_IP -HTTP/2 401 -date: Mon, 11 May 2020 18:15:05 GMT -content-type: application/json; charset=utf-8 -content-length: 50 -x-kong-response-latency: 0 -server: kong/2.0.4.0-enterprise-k8s - -{"message":"No required TLS certificate was sent"} -``` - -As we can see, Kong is restricting the request because it doesn't -have the necessary authentication information. - -Two things to note here: -- `-k` is used because Kong is set up to serve a self-signed certificate - by default. For full mutual authentication in production use cases, - you must configure Kong to serve a certificate that is signed by a trusted CA. -- For some deployments `$PROXY_IP` might contain a port that points to - `http` port of Kong. In others, it might happen that it contains a DNS name - instead of an IP address. If needed, please update the - command to send an `https` request to the `https` port of Kong or - the load balancer in front of it. - - -## Provisioning credential - -Next, in order to authenticate against Kong, create the client -certificate and private key with the following content: - -{:.important} ->This example is only used to show the format of client certificate and private key. **DO NOT** use the certificate and private key directly. -You should use a certificate and private key signed by your own CA. - -```bash -$ cat client.crt ------BEGIN CERTIFICATE----- -MIIEFTCCAv0CAWUwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcN -MjAwNTA4MjE0OTE1WhcNMjEwNTA4MjE0OTE1WjCBkDELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxDTALBgNVBAcMBHNvbWUxETAPBgNVBAoMCHNvbWUg -b3JnMRAwDgYDVQQLDAdvcmd1bml0MRswGQYDVQQDDBJleGFtcGxlLmtvbmdocS5j -b20xGzAZBgkqhkiG9w0BCQEWDGZvb0Bzb21lLmNvbTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBAM/y80ppzwGYS7zl+A6fx4Xkjwja+ZUK/AoBDazS3TkR -W1tDFZ71koLd60qK2W1d9Wh0/F3iNTcobVefr02mEcLtl+d4zUug+W7RsK/8JSCM -MIDVDYzlTWdd7RJzV1c/0NFZyTRkEVSjGn6eQoC/1aviftiNyfqWtuIDQ5ctSBt8 -2fyvDwu/tBR5VyKu7CLnjZ/ffjNT8WDfbO704XeBBId0+L8i8J7ddYlRhZufdjEw -hKx2Su8PZ9RnJYShTBOpD0xdveh16eb7dpCZiPnp1/MOCyIyo1Iwu570VoMde9SW -sPFLdUMiCXw+A4Gp/e9Am+D/98PiL4JChKsiowbzpDfMrVQH4Sblpcgn/Pp+u1be -2Kl/7wqr3TA+w/unLnBnB859v3wDhSW4hhKASoFwyX3VfJ43AkmWFUBX/bpDvHto -rFw+MvbSLsS3QD5KlZmega1pNZtin5KV8H/oJI/CjEc9HHwd27alW9VkUu0WrH0j -c98wLHB/9xXLjunabxSmd+wv25SgYNqpsRNOLgcJraJbaRh4XkbDyuvjF2bRJVP4 -pIjntxQHS/oDFFFK3wc7fp/rTAl0PJ7tytYj4urg45N3ts7unwnB8WmKzD9Avcwe -8Kst12cEibS8X2sg8wOqgB0yarC17mBEqONK7Fw4VH+VzZYw0KGF5DWjeSXj/XsD -AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEvTMHe27npmyJUBxQeHcNFniMJUWZf0 -i9EGd+XlF+m/l3rh1/mCecV7s32QTZEiFHv4UJPYASbgtx7+mEZuq7dVsxIUICWs -gyRkwvKjMqK2tR5IRkquhK5PuDS0QC3M/ZsDwnTgaezFrplFYf80z1kAAkm/c7eh -ZEjI6+1vuaS+HX1w2unk42PiAEB6oKFi3b8xl4TC6acYfMYiC3cOa/d3ZKHhqXhT -wM0VtDe0Qn1kExe+19XJG5cROelxmMXBm1+/c2KUw1yK8up6kJlEsmd8JLw/wMUp -xcJUKIH1qGBlRlFTYbVell+dB7IkHhadrnw27Z47uHobB/lzN69r63c= ------END CERTIFICATE----- -``` - -```bash -$ cat client.pem ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAz/LzSmnPAZhLvOX4Dp/HheSPCNr5lQr8CgENrNLdORFbW0MV -nvWSgt3rSorZbV31aHT8XeI1NyhtV5+vTaYRwu2X53jNS6D5btGwr/wlIIwwgNUN -jOVNZ13tEnNXVz/Q0VnJNGQRVKMafp5CgL/Vq+J+2I3J+pa24gNDly1IG3zZ/K8P -C7+0FHlXIq7sIueNn99+M1PxYN9s7vThd4EEh3T4vyLwnt11iVGFm592MTCErHZK -7w9n1GclhKFME6kPTF296HXp5vt2kJmI+enX8w4LIjKjUjC7nvRWgx171Jaw8Ut1 -QyIJfD4Dgan970Cb4P/3w+IvgkKEqyKjBvOkN8ytVAfhJuWlyCf8+n67Vt7YqX/v -CqvdMD7D+6cucGcHzn2/fAOFJbiGEoBKgXDJfdV8njcCSZYVQFf9ukO8e2isXD4y -9tIuxLdAPkqVmZ6BrWk1m2KfkpXwf+gkj8KMRz0cfB3btqVb1WRS7RasfSNz3zAs -cH/3FcuO6dpvFKZ37C/blKBg2qmxE04uBwmtoltpGHheRsPK6+MXZtElU/ikiOe3 -FAdL+gMUUUrfBzt+n+tMCXQ8nu3K1iPi6uDjk3e2zu6fCcHxaYrMP0C9zB7wqy3X -ZwSJtLxfayDzA6qAHTJqsLXuYESo40rsXDhUf5XNljDQoYXkNaN5JeP9ewMCAwEA -AQKCAgAt5cC/HuV6w6OL2PJMQAXozo6ndLV7qQYCK0Nabtw3GVahqQffovIoglTJ -iiX9Vqyw1olRK3l1aC3iFjO6Hrpy3MAXbflaBPio9g1aenuzWF3oQZ4RCBdyhi+q -T9zqPAKaAog/UQrmNG3HnqStCCpgGsjGgV0gOx24euHzPyJYNtFiTT0z6acUkcei -txsVhSgkLk8Lgy6WpBnGEDSnjMl0IGQ6w7p6RgUIPv8PXz3WE5BlBGL7qtnO7slA -Id3JxRnEUDh3U3dB7SS5O7oY9v0b/3CDpsuXe3wd1G432E0Zmf0J9Q63t46CZqmd -d+i9YvRE0BpemNDFnmrr3uQ+x43qARtonEELirY99aW0hUUfD7PieLNnZP7tueVB -J80GUU5ckJhn9u6SlKZtvBU2mAWvaKZEv1+9vDh4Le8fNtubpC5YtSKztc66csL6 -DLtyi81iftpF2YtDVKK8UB35KyG/0IWkXyfquOkYuL8RwrJR9tNL1+Zs4GqgC5sH -fVIwR6/+w/kpeH9nP8/0VaXRoqCjKQpVjFg9f645rZQ/OzcnQNv6k8Sr+4zHaHog -uFwOo7p4QfIPIBfU8+8RD36C5U/p5PiouR8sN+rfDCu0N07XKmHAphlqvjTR+OG/ -J5o3jGgAerMZn3gmiGUS+IdmrPw7we8gc8j8E8C6TjvlALQNOQKCAQEA6ySvPyMw -hiqfa9TeYce1gI2HtRyiCM1r7otFmTqS/I53he7b9LAZ5+gqXxMS/PW9QTvKET2q -vRU+xZYD4h/i9R+qZT3s7EsNBXBQHkvh0m0qNRtrsSgAYCWLsI/0nUOKIz6obHu5 -5SxS8y3y1t9SoXvWpzTpAnuk91BVMtSephf/4/hXlH2d1WnOC0SqS979aRrm8NE/ -rdT5qchhySyfZkYbADxy5AHHqoFTtkxGnLvcbY0X/oJI3zNYCFKTFNmb6/61cxuB -REjwREUFOhneXYb9mBG4bxuteCz65MyshiN1EAsVMnI6aEuHR6EAvt1Jslv7Qi1a -2UKM61XcL8m/lQKCAQEA4mTGaoZJ1yz+TCKMuae33Y9assXOYAQpdb3MY2UTgzQg -JAZYmwaAsBaC1e49G0eYVAP+eDI4u0OR0f0CW9Pf+OdGRWuZfVum0d+PmcIhJfgM -jXsR4CJpPcX7VZLHMZ77QFDh/xBHNXR8F1latPXFYR3ytcXxl4HEtodDpS84AgiO -57yPitY78MS16l3GJGWlgDdRP/LvVixugH2steHCtk8l932/qayUeezbYSEhyQ6L -13f0qRaBhvRsoULj3HvQWNPxmGYK3p+N+zXc1CErF6x8sDq4jeXyNg+26gZknea8 -3SEKKx+Wf4vT3rlUEeYy0uFubG06qYCdtj2ZuSOKNwKCAQEAgJpQqkRRrp8zD6Od -eHbkIonFVd1yFURcKlvLVdF+QFiesAaCD+IcWQRV4Cerc+PmfP35NtK2RbGP4jp4 -pzxvQUbvR23F3Tnyxi2188vmltKTifYUQRCym+MM8iTZUQV2UG5daO+GLPu/5jYU -IUaEh8MWE97RLUV4ZLZv0lwM5KQtlH3nUFQfdW/ne6wzQ0mS6OAIvF6E6EqZvSzV -plQcXbAr5kYpQ+BhVjRjF0nCOMhZ9yR6ofyZZFFNbUfUH0wghcKJdInveew2U/A3 -up4ZSqegnIHckA/gIODg2y/Bj59mz75v+mYU4aOlOhypLroSK1U5JultTXOjZLZR -tWUuvQKCAQAVcti9hOWABlo9WlSczkAENK2yHD12KU7IQegYTp4vowOchiFk5pPQ -mwFgRUbyy7Cp3QsB1jg7vaYWD/NmQceJbFfjAdOz5bgDUDvppFPBpiOCT/OcmYYA -/T3XmKVYlShWqpMOuDsW3GdZSvTmChbeIZk6EXvXD8tUQ7Jr9vJGdwsa92leDPf2 -0pwtjR7Vme+5GwSOm3SDZIg/kiiHvtDUtuDw9q/u4lPazU7nf90UkFU9X7cFQgWZ -hJS6Hn06CVzu3X2ZI6nJ97Ha5/p4+n97qbLSe226u9tbtddtipeDwjWIebXd6gs3 -IEc9Za+KVpXgFs2AZkTVhELs3h8vRCe3AoIBAQDRr0k5OePCsDbs6RadGI9Ta+pf -I30u8imKw8Rih++127UPjpc8OCzaQNvWnpdAoJTgo12fQJqGigRUfJMFFQn7u3jz -ggAq9WLRsXRZpEXk8NXDr/WhksOoWmkxLf4uNO7l2AytIFqZbb1pmTd0g+np2yBE -8VgDR45IxbGPQLsTzKXeXJuXOi7ut2ehJ+VgsS84BsRTeO4v+Y2qpGcyw6fXtU3E -NDrWe/C5QceILtDcd+JiXUgKrHRK+qrfawoxPBDVhYJ+N/Y7SqvZ2GvxibnRs8YA -cbhEebkfUHRQSEqkPr+ndRHInwWTMAWF4IhSuQOpTvT7PY7UNet2io8W8Py6 ------END RSA PRIVATE KEY----- -``` - -Now, use the key and certificate to authenticate against Kong and use the -service: - -```bash -$ curl --key client.key --cert client.crt https://$PROXY_IP/foo -k -I -HTTP/2 200 -content-type: text/plain; charset=UTF-8 -date: Mon, 11 May 2020 18:27:22 GMT -server: echoserver -x-kong-upstream-latency: 1 -x-kong-proxy-latency: 1 -via: kong/2.0.4.0-enterprise-k8s -``` - -## Conclusion - -This guide demonstrates how to implement client TLS authentication -using Kong. -You are free to use other features that mtls-auth plugin in Kong to -achieve more complicated use-cases. diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-oidc-plugin.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-oidc-plugin.md deleted file mode 100644 index 701241331423..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-oidc-plugin.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Using OIDC plugin ---- - -{{site.ee_product_name}}'s OIDC plugin can authenticate requests using OpenID Connect protocol. -This guide shows a basic example of how to setup the OIDC plugin using -the Ingress Controller. - -> Note: This works only with Enterprise version of Kong. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) documentation -to install enterprise version of the {{site.kic_product_name}}. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: 192.0.2.8.xip.io - http: - paths: - - path: / - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -We are using `192.0.2.8.xip.io` as our host, you can use any domain name -of your choice. A domain name is a prerequisite for this guide. -For demo purpose, we are using [xip.io](http://xip.io) -service to avoid setting up a DNS record. - -Test the Ingress rule: - -```bash -$ curl -i 192.0.2.8.xip.io/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Next, open a browser and browse to `http://192.0.2.8.xip.io`. -You should see landing page same as httpbin.org. - -## Setup OIDC plugin - -Now we are going to protect our dummy service with OpenID Connect -protocol using Google as our identity provider. - -First, set up an OAuth 2.0 application in -[Google](https://developers.google.com/identity/protocols/oauth2/openid-connect). - -Once you have setup your application in Google, use the client ID and client -secret and create a KongPlugin resource in Kubernetes: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: oidc-auth -config: - issuer: https://accounts.google.com/.well-known/openid-configuration - client_id: - - - client_secret: - - - redirect_uri: - - http://192.0.2.8.xip.io -plugin: openid-connect -" | kubectl apply -f - -kongplugin.configuration.konghq.com/global-rate-limit created -``` - -The `redirect_uri` parameter must be a URI that matches the Ingress rule we -created earlier. You must also [add it to your Google OIDC -configuration](https://developers.google.com/identity/protocols/oauth2/openid-connect#setredirecturi) - -Next, enable the plugin on our Ingress: - -```bash -$ kubectl patch ing demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"oidc-auth"}}}' -ingress.extensions/demo patched -``` -## Test - -Now, if you visit the host you have set up in your Ingress resource, -Kong should redirect you to Google to verify your identity. -Once you identify yourself, you should be able to browse our dummy service -once again. - -This basic configuration permits any user with a valid Google account to access -the dummy service. -For setting up more complicated authentication and authorization flows, -please read -[plugin docs](/gateway/latest/configure/auth/oidc-google/). diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-rewrites.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-rewrites.md deleted file mode 100644 index cdba8d4bb91f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-rewrites.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Rewriting hosts and paths ---- -This guide demonstrates host and path rewrites using Ingress and Service configuration. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a test Deployment - -To test our requests, we create an echo server Deployment, which responds to -HTTP requests with a summary of the request contents: - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -After completing the examples in the guide, you can clean up the example -configuration with `kubectl delete namespace echo`. - -For your actual production configuration, replace `echo` with whatever -namespace you use to run your application. - -## Create a Kubernetes service - -First, create a Kubernetes Service: - -```bash -echo " -apiVersion: v1 -kind: Service -metadata: - name: echo - namespace: echo -spec: - selector: - app: echo - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 80 -" | kubectl apply -f - -``` - -When referenced by an Ingress, this Service will create a Kong service and -upstream that uses the upstream IPs (Pod IPs) for its `Host` header and appends -request paths starting at `/`. - -## Create an Ingress to expose the service at the path `/myapp` on `example.com` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: my-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: myapp.example.com - http: - paths: - - path: /myapp - backend: - serviceName: echo - servicePort: 80 -' | kubectl create -f - -``` - -This Ingress will create a Kong route attached to the service we created above. -It will preserve its path but honor the service's hostname, so this request: - -```bash -$ curl -svX GET http://myapp.example.com/myapp/foo --resolve myapp.example.com:80:$PROXY_IP -GET /myapp/foo HTTP/1.1 -Host: myapp.example.com -User-Agent: curl/7.70.0 -Accept: */* -``` -will appear upstream as: - -``` -GET /myapp/foo HTTP/1.1 -Host: 10.16.4.8 -User-Agent: curl/7.70.0 -Accept: */* -``` - -We'll use this same cURL command in other examples as well. - -Actual output from cURL and the echo server will be more verbose. These -examples are condensed to focus primarily on the path and Host header. - -Note that this default behavior uses `strip_path=false` on the route. This -differs from Kong's standard default to conform with expected ingress -controller behavior. - -## Rewriting the host - -There are two options to override the default `Host` header behavior: - -- Add the [`konghq.com/host-header` annotation][1] to your Service, which sets - the `Host` header directly: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/host-header":"internal.myapp.example.com"}}}' - ``` - The request upstream will now use the header from that annotation: - ``` - GET /myapp/foo HTTP/1.1 - Host: internal.myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/preserve-host` annotation][0] to your Ingress, which - sends the route/Ingress hostname: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/preserve-host":"true"}}}' - ``` - The request upstream will now include the hostname from the Ingress rule: - ``` - GET /myapp/foo HTTP/1.1 - Host: myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` - -The `preserve-host` annotation takes precedence, so if you add both annotations -above, the upstream host header will be `myapp.example.com`. - -## Rewriting the path - -There are two options to rewrite the default path handling behavior: - -- Add the [`konghq.com/strip-path` annotation][2] to your Ingress, which strips - the path component of the route/Ingress, leaving the remainder of the path at - the root: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/strip-path":"true"}}}' - ``` - The request upstream will now only contain the path components not in the - Ingress rule: - ``` - GET /foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/path` annotation][3] to your Service, which prepends - that value to the upstream path: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/path":"/api"}}}' - ``` - The request upstream will now contain a leading `/api`: - ``` - GET /api/myapp/foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -`strip-path` and `path` can be combined together, with the `path` component -coming first. Adding both annotations above will send requests for `/api/foo`. - -[0]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompreserve-host -[1]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomhost-header -[2]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomstrip-path -[3]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompath diff --git a/app/kubernetes-ingress-controller/1.1.x/guides/using-tcpingress.md b/app/kubernetes-ingress-controller/1.1.x/guides/using-tcpingress.md deleted file mode 100644 index dd759e971f62..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/guides/using-tcpingress.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: TCPIngress with Kong ---- - -This guide walks through using TCPIngress Custom Resource -resource to expose TCP-based services running in Kubernetes to the out -side world. - -## Overview - -TCP-based Ingress means that Kong simply forwards the TCP stream to a Pod -of a Service that's running inside Kubernetes. Kong will not perform any -sort of transformations. - -There are two modes avaialble: -- **Port based routing**: In this mode, Kong simply proxies all traffic it - receives on a specific port to the Kubernetes Service. TCP connections are - load balanced across all the available pods of the Service. -- **SNI based routing**: In this mode, Kong accepts a TLS-encrypted stream - at the specified port and can route traffic to different services based on - the `SNI` present in the TLS handshake. Kong will also terminate the TLS - handshake and forward the TCP stream to the Kubernetes Service. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -> **Note**: This feature works with Kong versions 2.0.4 and above. - -> **Note**: This feature is available in Controller versions 0.8 and above. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Configure Kong for new ports - -First, we will configure Kong's Deployment and Service to expose two new ports -9000 and 9443. Port 9443 expects a TLS connection from the client. - -```shell -$ kubectl patch deploy -n kong ingress-kong --patch '{ - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "proxy", - "env": [ - { - "name": "KONG_STREAM_LISTEN", - "value": "0.0.0.0:9000, 0.0.0.0:9443 ssl" - } - ], - "ports": [ - { - "containerPort": 9000, - "name": "stream9000", - "protocol": "TCP" - }, - { - "containerPort": 9443, - "name": "stream9443", - "protocol": "TCP" - } - ] - } - ] - } - } - } -}' -deployment.extensions/ingress-kong patched -``` - -```shell -$ kubectl patch service -n kong kong-proxy --patch '{ - "spec": { - "ports": [ - { - "name": "stream9000", - "port": 9000, - "protocol": "TCP", - "targetPort": 9000 - }, - { - "name": "stream9443", - "port": 9443, - "protocol": "TCP", - "targetPort": 9443 - } - ] - } -}' -service/kong-proxy patched -``` - -You are free to choose other ports as well. - -## Install TCP echo service - -Next, we will install a dummy TCP service. -If you already have a TCP-based service running in your cluster, -you can use that as well. - -```shell -$ kubectl apply -f https://bit.ly/tcp-echo -deployment.apps/tcp-echo created -service/tcp-echo created -``` - -Now, we have a TCP echo service running in Kubernetes. -We will now expose this on plain-text and a TLS based port. - -## TCP port based routing - -To expose our service to the outside world, create the following -`TCPIngress` resource: - -```shell -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-plaintext - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - port: 9000 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-plaintext created -``` - -Here we are instructing Kong to forward all traffic it receives on port -9000 to `tcp-echo` service on port 2701. - -Once created, we can see the IP address at which this is available: - -```shell -$ kubectl get tcpingress -NAME ADDRESS AGE -echo-plaintext 3m18s -``` - -Lets connect to this service using `telnet`: - -```shell -$ telnet $PROXY_IP 9000 -Trying 35.247.39.83... -Connected to 35.247.39.83. -Escape character is '^]'. -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^] -telnet> Connection closed. -``` - -We can see here that the `tcp-echo` service is now available outside the -Kubernetes cluster via Kong. - -## TLS SNI based routing - -Next, we will demonstrate how Kong can help expose the `tcp-echo` service -in a secure manner to the outside world. - -Create the following TCPIngress resource: - -``` -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-tls - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - port: 9443 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-tls created -``` - -Now, we can access the `tcp-echo` service on port 9443, on SNI `example.com`. - -You should setup a DNS record for a Domain that you control -to point to PROXY_IP and then access -the service via that for production usage. - -In our contrived demo example, we can connect to the service via TLS -using `openssl`'s `s_client` command: - -```shell -$ openssl s_client -connect $PROXY_IP:9443 -servername example.com -quiet -openssl s_client -connect 35.247.39.83:9443 -servername foo.com -quiet -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify error:num=18:self signed certificate -verify return:1 -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify return:1 -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^C -``` - -Since Kong is not configured with a TLS cert-key pair for `example.com`, Kong -is returning a self-signed default certificate, which is not trusted. -You can also see that the echo service is running as expected. - -## Bonus - -Scale the `tcp-echo` Deployment to have multiple replicas and observe how -Kong load-balances the TCP-connections between pods. - -## Conclusion - -In this guide, we see how to use Kong's TCP routing capabilities using -TCPIngress Custom Resource. This can be very useful if you have services -running inside Kubernetes that have custom protocols instead of the more -popular HTTP or gRPC protocols. diff --git a/app/kubernetes-ingress-controller/1.1.x/index.md b/app/kubernetes-ingress-controller/1.1.x/index.md deleted file mode 100644 index 34bbdc768d68..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Kong Ingress Controller ---- - -## Concepts - -### Architecture - -The [design][design] document explains how the {{site.kic_product_name}} works -inside a Kubernetes cluster and configures Kong to proxy traffic as per -rules defined in the Ingress resources. - -### Custom Resources - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, the `KongIngress` Custom resource is used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to [custom resources][crd] concept document for more details. - -### Deployment Methods - -The {{site.kic_product_name}} can be deployed in a variety of deployment patterns. -Please refer to the [deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/) documentation, -which explains all the components -involved and different ways of deploying them based on the use-case. - -### High-availability and Scaling - -The {{site.kic_product_name}} is designed to scale with your traffic -and infrastructure. -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) to understand -failures scenarios, recovery methods, as well as scaling considerations. - -### Ingress classes - -[Ingress classes](/kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes) filter which resources the -controller loads. They ensure that {{site.kic_product_name}} instances do not -load configuration intended for other instances or other ingress controllers. - -### Security - -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/security/) to understand the -default security settings and how to further secure the Ingress Controller. - -## Guides and Tutorials - -Please browse through [guides][guides] to get started or understand how to configure -a specific setting with the {{site.kic_product_name}}. - -## Configuration Reference - -The configurations in the {{site.kic_product_name}} can be tweaked using -Custom Resources and annotations. -Please refer to the following documents detailing this process: - -- [Custom Resource Definitions](/kubernetes-ingress-controller/{{page.release}}/references/custom-resources/) -- [Annotations](/kubernetes-ingress-controller/{{page.release}}/references/annotations/) -- [CLI arguments](/kubernetes-ingress-controller/{{page.release}}/references/cli-arguments/) -- [Version compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/version-compatibility/) -- [Plugin compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/plugin-compatibility/) - -## FAQs - -[FAQs][faqs] will help find answers to common problems quickly. -Please feel free to open Pull Requests to contribute to the list. - -## Troubleshooting - -Please read through our [deployment guide][deployment] for a detailed -understanding of how Ingress Controller is designed and deployed -along alongside Kong. - -- [FAQs][faqs] might help as well. -- [Troubleshooting][troubleshooting] guide can help - resolve some issues. - Please contribute back if you feel your experience can help - the larger community. - -[annotations]: /kubernetes-ingress-controller/{{page.release}}/references/annotations -[crd]: /kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources -[deployment]: /kubernetes-ingress-controller/{{page.release}}/deployment/overview -[design]: /kubernetes-ingress-controller/{{page.release}}/concepts/design -[faqs]: /kubernetes-ingress-controller/{{page.release}}/faq -[troubleshooting]: /kubernetes-ingress-controller/{{page.release}}/troubleshooting -[guides]: /kubernetes-ingress-controller/{{page.release}}/guides/overview diff --git a/app/kubernetes-ingress-controller/1.1.x/references/annotations.md b/app/kubernetes-ingress-controller/1.1.x/references/annotations.md deleted file mode 100644 index 3f29e830aaa6..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/references/annotations.md +++ /dev/null @@ -1,445 +0,0 @@ ---- -title: Kong Ingress Controller annotations ---- - -The {{site.kic_product_name}} supports the following annotations on various -resources: - -## Ingress resource - -Following annotations are supported on Ingress resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the Ingress rules that Kong should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for specific Ingress. | -| [`konghq.com/protocols`](#konghqcomprotocols) | Set protocols to handle for each Ingress resource. | -| [`konghq.com/preserve-host`](#konghqcompreserve-host) | Pass the `host` header as is to the upstream service. | -| [`konghq.com/strip-path`](#konghqcomstrip-path) | Strip the path defined in Ingress resource and then forward the request to the upstream service. | -| [`konghq.com/https-redirect-status-code`](#konghqcomhttps-redirect-status-code) | Set the HTTPS redirect status code to use when an HTTP request is received. | -| [`konghq.com/regex-priority`](#konghqcomregex-priority) | Set the route's regex priority. | -| [`konghq.com/methods`](#konghqcommethods) | Set methods matched by this Ingress. | -| [`konghq.com/snis`](#konghqcomsnis) | Set SNI criteria for routes created from this Ingress. | -| [`konghq.com/override`](#konghqcomoverride) | Control other routing attributes via `KongIngress` resource. | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-ingress-v1beta1` controller flag removes that requirement: -when enabled, the controller will process Ingresses with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is intended for -older configurations, as controller versions prior to 0.10 processed classless -Ingress resources by default. - -## Service resource - -Following annotations are supported on Service resources: - -| Annotation name | Description | -|-----------------|-------------| -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific Service | -| [`konghq.com/protocol`](#konghqcomprotocol) | Set protocol Kong should use to talk to a Kubernetes service | -| [`konghq.com/path`](#konghqcompath) | HTTP Path that is always prepended to each request that is forwarded to a Kubernetes service | -| [`konghq.com/client-cert`](#konghqcomclient-cert) | Client certificate and key pair Kong should use to authenticate itself to a specific Kubernetes service | -| [`konghq.com/host-header`](#konghqcomhost-header) | Set the value sent in the `Host` header when proxying requests upstream | -| [`konghq.com/override`](#konghqcomoverride) | Fine grained routing and load-balancing | -| [`ingress.kubernetes.io/service-upstream`](#ingresskubernetesioservice-upstream) | Offload load-balancing to kube-proxy or sidecar | - -## KongConsumer resource - -Following annotations are supported on KongConsumer resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the KongConsumers that a controller should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific consumer | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-kong-consumer` controller flag removes that requirement: -when enabled, the controller will process KongConsumers with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is primarily intended for -older configurations, as controller versions prior to 0.10 processed classless -KongConsumer resources by default. - -## Annotations - -### kubernetes.io/ingress.class - -If you have multiple Ingress controllers in a single cluster, -you can pick one by specifying the `ingress.class` annotation. -Following is an example of -creating an Ingress with an annotation: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "gce" -spec: - rules: - - host: example.com - http: - paths: - - path: /test1 - backend: - serviceName: echo - servicePort: 80 -``` - -This will target the GCE controller, forcing the {{site.kic_product_name}} to -ignore it. - -On the other hand, an annotation such as - -```yaml -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "kong" -``` - -will target the {{site.kic_product_name}}, forcing the GCE controller -to ignore it. - -The following resources _require_ this annotation by default: - -- Ingress -- KongConsumer -- TCPIngress -- KongClusterPlugin -- Secret resources with the `ca-cert` label - -You can optionally allow Ingress or KongConsumer resources with no class -annotation (by setting the `--process-classless-ingress-v1beta1` or -`--process-classless-kong-consumer` flags, respectively), though recommended -best practice is to leave these flags disabled: the flags are primarily -intended for compatibility with configuration created before this requirement -was introduced in controller 0.10. - -If you allow classless resources, you must take care when using multiple -controller instances in a single cluster: only one controller instance should -enable these flags to avoid different controller instances fighting over -classless resources, which will result in unexpected and unknown behavior. - -The ingress class used by the {{site.kic_product_name}} to filter Ingress -resources can be changed using the `CONTROLLER_INGRESS_CLASS` -environment variable. - -```yaml -spec: - template: - spec: - containers: - - name: kong-ingress-internal-controller - env: - - name: CONTROLLER_INGRESS_CLASS - value: kong-internal -``` - -#### Multiple unrelated {{site.kic_product_name}}s {#multiple-unrelated-controllers} - -In some deployments, one might use multiple {{site.kic_product_name}}s -in the same Kubernetes cluster -(e.g. one which serves public traffic, one which serves "internal" traffic). -For such deployments, please ensure that in addition to different -`ingress-class`, the `--election-id` is also different. - -In such deployments, `kubernetes.io/ingress.class` annotation can be used on the -following custom resources as well: - -- KongPlugin: To configure (global) plugins only in one of the Kong clusters. -- KongConsumer: To create different consumers in different Kong clusters. -- KongCredential: To create associated credentials for consumers. - -### konghq.com/plugins - -> Available since controller 0.8 - -Kong's power comes from its plugin architecture, where plugins can modify -the request and response or impose certain policies on the requests as they -are proxied to your service. - -With the {{site.kic_product_name}}, plugins can be configured by creating -`KongPlugin` Custom Resources and then associating them with an Ingress, Service, -KongConsumer or a combination of those. - -Following is an example of how to use the annotation: - -```yaml -konghq.com/plugins: high-rate-limit, docs-site-cors -``` - -Here, `high-rate-limit` and `docs-site-cors` -are the names of the KongPlugin resources which -should be to be applied to the Ingress rules defined in the -Ingress resource on which the annotation is being applied. - -This annotation can also be applied to a Service resource in Kubernetes, which -will result in the plugin being executed at Service-level in Kong, -meaning the plugin will be -executed for every request that is proxied, no matter which Route it came from. - -This annotation can also be applied to a KongConsumer resource, -which results in plugin being executed whenever the specific consumer -is accessing any of the defined APIs. - -Finally, this annotation can also be applied on a combination of the -following resources: -- **Ingress and KongConsumer** - If an Ingress resource and a KongConsumer resource share a plugin in the - `konghq.com/plugins` annotation then the plugin will be created for the - combination of those to resources in Kong. -- **Service and KongConsumer** - Same as the above case, if you would like to give a specific consumer or - client of your service some special treatment, you can do so by applying - the same annotation to both of the resources. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how this annotation can be used. - - -### konghq.com/path - -> Available since controller 0.8 - -This annotation can be used on a Service resource only. -This annotation can be used to prepend an HTTP path of a request, -before the request is forwarded. - -For example, if the annotation `konghq.com/path: "/baz"` is applied to a -Kubernetes Service `billings`, then any request that is routed to the -`billings` service will be prepended with `/baz` HTTP path. If the -request contains `/foo/something` as the path, then the service will -receive an HTTP request with path set as `/baz/foo/something`. - -### konghq.com/strip-path - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the part of the path specified in the Ingress rule - will be stripped out before the request is sent to the service. - For example, if the Ingress rule has a path of `/foo` and the HTTP request - that matches the Ingress rule has the path `/foo/bar/something`, then - the request sent to the Kubernetes service will have the path - `/bar/something`. -- `"false"`: If set to false, no path manipulation is performed. - -All other values are ignored. -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/strip-path: "true" -``` - -### konghq.com/preserve-host - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the `host` header of the request will be sent - as is to the Service in Kubernetes. -- `"false"`: If set to false, the `host` header of the request is not preserved. - -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/preserve-host: "true" -``` - -### konghq.com/https-redirect-status-code - -> Available since controller 0.8 - -By default, Kong sends HTTP Status Code 426 for requests -that need to be redirected to HTTPS. -This can be changed using this annotations. -Acceptable values are: -- 301 -- 302 -- 307 -- 308 -- 426 - -Any other value will be ignored. - -Sample usage: - -```yaml -konghq.com/https-redirect-status-code: "301" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/regex-priority - -> Available since controller 0.9 - -Sets the `regex_priority` setting to this value on the Kong route associated -with the Ingress resource. This controls the [matching evaluation -order](/gateway/latest/reference/proxy/#evaluation-order) for regex-based -routes. It accepts any integer value. Routes are evaluated in order of highest -priority to lowest. - -Sample usage: - -```yaml -konghq.com/regex-priority: "10" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/methods - -> Available since controller 0.9 - -Sets the `methods` setting on the Kong route associated with the Ingress -resource. This controls which request methods will match the route. Any -uppercase alpha ASCII string is accepted, though most users will use only -[standard methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). - -Sample usage: - -```yaml -konghq.com/methods: "GET,POST" -``` - -### konghq.com/snis - -> Available since controller 1.1 - -Sets the `snis` match criteria on the Kong route associated with this Ingress. -When using route-attached plugins that execute during the certificate -phase (for example, [Mutual TLS Authentication](/hub/kong-inc/mtls-auth/)), -the `snis` annotation allows route matching based on the server name -indication information sent in a client's TLS handshake. - -Sample usage: - -```yaml -konghq.com/snis: "foo.example.com, bar.example.com" -``` - -### konghq.com/override - -> Available since controller 0.8 - -This annotation can associate a KongIngress resource with -an Ingress or a Service resource. -It serves as a way to bridge the gap between a sparse Ingress API in Kubernetes -with fine-grained controlled using the properties of Service, Route -and Upstream entities in Kong. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this annotation. - -### konghq.com/protocol - -> Available since controller 0.8 - -This annotation can be set on a Kubernetes Service resource and indicates -the protocol that should be used by Kong to communicate with the service. -In other words, the protocol is used for communication between a -[Kong Service](/gateway/api/admin-ee/latest/#/Services/list-service/) and -a Kubernetes Service, internally in the Kubernetes cluster. - -Accepted values are: -- `http` -- `https` -- `grpc` -- `grpcs` -- `tcp` -- `tls` - -### konghq.com/protocols - -> Available since controller 0.8 - -This annotation sets the list of acceptable protocols for the all the rules -defined in the Ingress resource. -The protocols are used for communication between the -Kong and the external client/user of the Service. - -You usually want to set this annotation for the following two use-cases: -- You want to redirect HTTP traffic to HTTPS, in which case you will use - `konghq.com/protocols: "https"` -- You want to define gRPC routing, in which case you should use - `konghq.com/protocols: "grpc,grpcs"` - -### konghq.com/client-cert - -> Available since controller 0.8 - -This annotation sets the certificate and key-pair Kong should use to -authenticate itself against the upstream service, if the upstream service -is performing mutual-TLS (mTLS) authentication. - -The value of this annotation should be the name of the Kubernetes TLS Secret -resource which contains the TLS cert and key pair. - -Under the hood, the controller creates a Certificate in Kong and then -sets the -[`service.client_certificate`](/gateway/api/admin-ee/latest/#/Services/list-service/) -for the service. - -### konghq.com/host-header - -> Available since controller 0.9 - -Sets the `host_header` setting on the Kong upstream created to represent a -Kubernetes Service. By default, Kong upstreams set `Host` to the hostname or IP -address of an individual target (the Pod IP for controller-managed -configuration). This annotation overrides the default behavior and sends -the annotation value as the `Host` header value. - -If `konghq.com/preserve-host: true` is present on an Ingress (or -`route.preserve_host: true` is present in a linked KongIngress), it will take -precedence over this annotation, and requests to the application will use the -hostname in the Ingress rule. - -Sample usage: - -```yaml -konghq.com/host-header: "test.example.com" -``` - -### ingress.kubernetes.io/service-upstream - -By default, the {{site.kic_product_name}} distributes traffic amongst all the -Pods of a Kubernetes `Service` by forwarding the requests directly to -Pod IP addresses. One can choose the load-balancing strategy to use -by specifying a KongIngress resource. - -However, in some use-cases, the load-balancing should be left up -to `kube-proxy`, or a sidecar component in the case of Service Mesh deployments. - -Setting this annotation to a Service resource in Kubernetes will configure -the {{site.kic_product_name}} to directly forward -the traffic outbound for this Service -to the IP address of the service (usually the ClusterIP). - -`kube-proxy` can then decide how it wants to handle the request and route the -traffic accordingly. If a sidecar intercepts the traffic from the controller, -it can also route traffic as it sees fit in this case. - -Following is an example snippet you can use to configure this annotation -on a `Service` resource in Kubernetes, (please note the quotes around `true`): - -```yaml -annotations: - ingress.kubernetes.io/service-upstream: "true" -``` - -You need {{site.kic_product_name}} >= 0.6 for this annotation. diff --git a/app/kubernetes-ingress-controller/1.1.x/references/cli-arguments.md b/app/kubernetes-ingress-controller/1.1.x/references/cli-arguments.md deleted file mode 100644 index 4683b67080ae..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/references/cli-arguments.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: CLI Arguments ---- - -Various settings and configurations of the controller can be tweaked -using CLI flags. - -## Environment variables - -Each flag defined in the table below can also be configured using -an environment variable. The name of the environment variable is `CONTROLLER_` -string followed by the name of flag in uppercase. - -For example, `--ingress-class` can be configured using the following -environment variable: - -``` -CONTROLLER_INGRESS_CLASS=kong-foobar -``` - -It is recommended that all the configuration is done via environment variables -and not CLI flags. - -## Flags - -Following table describes all the flags that are available: - -| Flag | Type | Default | Description | -|-------|------|---------|-------------| -| --admission-webhook-cert-file |`string` | `/admission-webhook/tls.crt` | Path to the PEM-encoded certificate file for TLS handshake.| -| --admission-webhook-key-file |`string` | `/admission-webhook/tls.key` | Path to the PEM-encoded private key file for TLS handshake.| -| --admission-webhook-cert |`string` | none | PEM-encoded certificate string for TLS handshake.| -| --admission-webhook-key |`string` | none | PEM-encoded private key string for TLS handshake.| -| --admission-webhook-listen |`string` | `off` | The address to start admission controller on (ip:port). Setting it to 'off' disables the admission controller.| -| --anonymous-reports |`string` | `true` | Send anonymized usage data to help improve Kong.| -| --apiserver-host |`string` | none | The address of the Kubernetes API server to connect to in the format of protocol://address:port, e.g., "http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.| -| --disable-ingress-extensionsv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `extensions/v1beta1`.| -| --disable-ingress-networkingv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1beta1`.| -| --disable-ingress-networkingv1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1`.| -| --election-id |`string` | `ingress-controller-leader` | The name of ConfigMap (in the same namespace) to use to facilitate leader-election between multiple instances of the controller.| -| --ingress-class |`string` | `kong` | Ingress class name to use to filter Ingress and custom resources when multiple Ingress Controllers are running in the same Kubernetes cluster.| -| --kong-admin-ca-cert-file |`string` | none | Path to PEM-encoded CA certificate file to verify Kong's Admin SSL certificate.| -| --kong-admin-ca-cert |`string` | none | PEM-encoded CA certificate string to verify Kong's Admin SSL certificate.| -| --kong-admin-concurrency |`int` | `10` | Max number of concurrent requests sent to Kong's Admin API.| -| --kong-admin-filter-tag |`string` | `managed-by-ingress-controller` | The tag used to manage entities in Kong.| -| --kong-admin-header |`string` | none | Add a header (key:value) to every Admin API call, this flag can be used multiple times to specify multiple headers.| -| --kong-admin-token |`string` | none | Set the {{site.ee_product_name}} RBAC token to be used by the controller.| -| --kong-admin-tls-server-name |`string` | none | SNI name to use to verify the certificate presented by Kong in TLS.| -| --kong-admin-tls-skip-verify |`boolean` | `false` | Disable verification of TLS certificate of Kong's Admin endpoint.| -| --kong-admin-url |`string` | `http://localhost:8001` | The address of the Kong Admin URL to connect to in the format of `protocol://address:port`.| -| --kong-workspace |`string` | `default` | Workspace in {{site.ee_product_name}} to be configured.| -| --kong-custom-entities-secret |`string` | none | Secret containing custom entities to be populated in DB-less mode, takes the form `namespace/name`.| -| --log-format |`string` | `text` | Format of logs of the controller. Allowed values are `text` and `json`. | -| --log-level |`string` | `info` | Level of logging for the controller. Allowed values are `trace`, `debug`, `info`, `warn`, `error`, `fatal` and `panic`. | -| --enable-reverse-sync |`bool` | `false` | Enable reverse checks from Kong to Kubernetes. Use this option only if a human has edit access to Kong's Admin API. | -| --kubeconfig |`string` | none | Path to kubeconfig file with authorization and master location information.| -| --profiling |`boolean` | `true` | Enable profiling via web interface `host:port/debug/pprof/`. | -| --publish-service |`string` | none | The namespaces and name of the Kubernetes Service fronting the {{site.kic_product_name}} in the form of namespace/name. The controller will set the status of the Ingress resources to match the endpoints of this service. In reference deployments, this is kong/kong-proxy.| -| --publish-status-address |`string` | none | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address.| -| --process-classless-ingress-v1beta1 |`boolean` | `false` | Toggles whether the controller processes `extensions/v1beta1` and `networking/v1beta1` Ingress resources that have no `kubernetes.io/ingress.class` annotation.| -| --process-classless-ingress-v1 |`boolean` | `false` | Toggles whether the controller processes `networking/v1` Ingress resources that have no `kubernetes.io/ingress.class` annotation or class field.| -| --process-classless-kong-consumer |`boolean` | `false` | Toggles whether the controller processes KongConsumer resources that have no `kubernetes.io/ingress.class` annotation.| -| --stderrthreshold |`string` | `2` | logs at or above this threshold go to stderr.| -| --sync-period |`duration` | `10m` | Relist and confirm cloud resources this often.| -| --sync-rate-limit |`float32` | `0.3` | Define the sync frequency upper limit. | -| --update-status |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname.| -| --update-status-on-shutdown |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped.| -| --version |`boolean` | `false` | Shows release information about the {{site.kic_product_name}}.| -| --watch-namespace |`string` | none | Namespace to watch for Ingress and custom resources. The default value of an empty string results in the controller watching for resources in all namespaces and configuring Kong accordingly.| -| --help |`boolean` | `false` | Shows this documentation on the CLI and exit.| diff --git a/app/kubernetes-ingress-controller/1.1.x/references/custom-resources.md b/app/kubernetes-ingress-controller/1.1.x/references/custom-resources.md deleted file mode 100644 index e0f7205e4fea..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/references/custom-resources.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -The Ingress Controller can configure Kong specific features -using several [Custom Resource Definitions(CRDs)][k8s-crd]. - -Following CRDs enables users to declaratively configure all aspects of Kong: - -- [**KongPlugin**](#kongplugin): This resource corresponds to - the [Plugin][kong-plugin] entity in Kong. -- [**KongIngress**](#kongingress): This resource provides fine-grained control - over all aspects of proxy behaviour like routing, load-balancing, - and health checking. It serves as an "extension" to the Ingress resources - in Kubernetes. -- [**KongConsumer**](#kongconsumer): - This resource maps to the [Consumer][kong-consumer] entity in Kong. -- [**TCPIngress**](#tcpingress): - This resource can configure TCP-based routing in Kong for non-HTTP - services running inside Kubernetes. -- [**KongCredential (Deprecated)**](#kongcredential-deprecated): - This resource maps to - a credential (key-auth, basic-auth, jwt, hmac-auth) that is associated with - a specific KongConsumer. - -## KongPlugin - -This resource provides an API to configure plugins inside Kong using -Kubernetes-style resources. - -Please see the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#KongPlugin) -document for how the resource should be used. - -The following snippet shows the properties available in KongPlugin resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: - namespace: -disabled: # optionally disable the plugin in Kong -config: # configuration for the plugin - key: value -configFrom: - secretKeyRef: - name: - key: -plugin: # like key-auth, rate-limiting etc -``` - -- `config` contains a list of `key` and `value` - required to configure the plugin. - All configuration values specific to the type of plugin go in here. - Please read the documentation of the plugin being configured to set values - in here. For any plugin in Kong, anything that goes in the `config` JSON - key in the Admin API request, goes into the `config` YAML key in this resource. - Please use a valid JSON to YAML convertor and place the content under the - `config` key in the YAML above. -- `configFrom` contains a reference to a Secret and key, where the key contains - a complete JSON or YAML configuration. This should be used when the plugin - configuration contains sensitive information, such as AWS credentials in the - Lambda plugin or the client secret in the OIDC plugin. Only one of `config` - or `configFrom` may be used in a KongPlugin, not both at once. -- `plugin` field determines the name of the plugin in Kong. - This field was introduced in {{site.kic_product_name}} 0.2.0. - -**Please note:** validation of the configuration fields is left to the user -by default. It is advised to setup and use the admission validating controller -to catch user errors. - -The plugins can be associated with Ingress -or Service object in Kubernetes using `konghq.com/plugins` annotation. - -### Examples - -#### Applying a plugin to a service - -Given the following plugin: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id - echo_downstream: true -plugin: correlation-id -``` - -It can be applied to a service by annotating like: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp-service - labels: - app: myapp-service - annotations: - konghq.com/plugins: request-id -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: myapp-service - selector: - app: myapp-service -``` - -#### Applying a plugin to an ingress - -The KongPlugin above can be applied to a specific ingress (route or routes): - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -``` - -A plugin can also be applied to a specific KongConsumer by adding -`konghq.com/plugins` annotation to the KongConsumer resource. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how to use this resource. - -#### Applying a plugin with a secret configuration - -The plugin above can be modified to store its configuration in a secret: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -configFrom: - secretKeyRef: - name: plugin-conf-secret - key: request-id -plugin: correlation-id -``` - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: plugin-conf-secret -stringData: - request-id: | - header_name: my-request-id - echo_downstream: true -type: Opaque -``` - -## KongClusterPlugin - -A `KongClusterPlugin` is same as `KongPlugin` resource. The only differences -are that it is a Kubernetes cluster-level resource instead of a namespaced -resource, and can be applied as a global plugin using labels. - -Please consult the [KongPlugin](#kongplugin) section for details. - -*Example:* - -KongClusterPlugin example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: request-id - annotations: - kubernetes.io/ingress.class: - labels: - global: "true" # optional, if set, then the plugin will be executed - # for every request that Kong proxies - # please note the quotes around true -config: - header_name: my-request-id -configFrom: - secretKeyRef: - name: - key: - namespace: -plugin: correlation-id -``` - -As with KongPlugin, only one of `config` or `configFrom` can be used. - -Setting the label `global` to `"true"` will apply the plugin globally in Kong, -meaning it will be executed for every request that is proxied via Kong. - -## KongIngress - -Ingress resource spec in Kubernetes can define routing policies -based on HTTP Host header and paths. -While this is sufficient in most cases, -sometimes, users may want more control over routing at the Ingress level. -`KongIngress` serves as an "extension" to Ingress resource. -It is not meant as a replacement to the -`Ingress` resource in Kubernetes. - -Please read the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#kongingress) -document for why this resource exists and how it relates to the existing -Ingress resource. - -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and -[Route][kong-route] entities in Kong related to an Ingress resource -can be modified. - -Once a `KongIngress` resource is created, it needs to be associated with -an Ingress or Service resource using the following annotation: - -```yaml -konghq.com/override: kong-ingress-resource-name -``` - -Specifically, - -- To override any properties related to health-checking, load-balancing, - or details specific to a service, add the annotation to the Kubernetes - Service that is being exposed via the Ingress API. -- To override routing configuration (like protocol or method based routing), - add the annotation to the Ingress resource. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this resource. - -For reference, the following is a complete spec for KongIngress: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: configuration-demo -upstream: - slots: 10 - hash_on: none - hash_fallback: none - healthchecks: - threshold: 25 - active: - concurrency: 10 - healthy: - http_statuses: - - 200 - - 302 - interval: 0 - successes: 0 - http_path: "/" - timeout: 1 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - interval: 0 - tcp_failures: 0 - timeouts: 0 - passive: - healthy: - http_statuses: - - 200 - successes: 0 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - - 503 - tcp_failures: 0 - timeouts: 0 -proxy: - protocol: http - path: / - connect_timeout: 10000 - retries: 10 - read_timeout: 10000 - write_timeout: 10000 -route: - methods: - - POST - - GET - regex_priority: 0 - strip_path: false - preserve_host: true - protocols: - - http - - https -``` - -## TCPIngress - -The Ingress resource in Kubernetes is HTTP-only. -This custom resource is modeled similar to the Ingress resource but for -TCP and TLS SNI based routing purposes: - -```yaml -apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -spec: - rules: - - host: - port: - backend: - serviceName: - servicePort: -``` - -If `host` is not specified, then port-based TCP routing is performed. Kong -doesn't care about the content of TCP stream in this case. - -If `host` is specified, then Kong expects the TCP stream to be TLS-encrypted -and Kong will terminate the TLS session based on the SNI. -Also note that, the port in this case should be configured with `ssl` parameter -in Kong. - -## KongConsumer - -This custom resource configures a consumer in Kong: - -The following snippet shows the field available in the resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -username: -custom_id: -``` - -An example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: consumer-team-x - annotations: - kubernetes.io/ingress.class: kong -username: team-X -``` - -When this resource is created, a corresponding consumer entity will be -created in Kong. - -Consumers' `username` and `custom_id` values must be unique across the Kong -cluster. While KongConsumers exist in a specific Kubernetes namespace, -KongConsumers from all namespaces are combined into a single Kong -configuration, and no KongConsumers with the same `kubernetes.io/ingress.class` -may share the same `username` or `custom_id` value. - -## KongCredential (Deprecated) - -This custom resource can be used to configure a consumer specific -entities in Kong. -The resource reference the KongConsumer resource via the `consumerRef` key. - -The validation of the config object is left up to the user. - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongCredential -metadata: - name: credential-team-x -consumerRef: consumer-team-x -type: key-auth -config: - key: 62eb165c070a41d5c1b58d9d3d725ca1 -``` - -The following credential types can be provisioned using the KongCredential -resource: - -- `key-auth` for [Key authentication](/hub/kong-inc/key-auth/) -- `basic-auth` for [Basic authentication](/hub/kong-inc/basic-auth/) -- `hmac-auth` for [HMAC authentication](/hub/kong-inc/hmac-auth/) -- `jwt` for [JWT based authentication](/hub/kong-inc/jwt/) -- `oauth2` for [Oauth2 Client credentials](/hub/kong-inc/oauth2/) -- `acl` for [ACL group associations](/hub/kong-inc/acl/) - -Please ensure that all fields related to the credential in Kong -are present in the definition of KongCredential's `config` section. - -Please refer to the -[using the Kong Consumer and Credential resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource) -guide for details on how to use this resource. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ diff --git a/app/kubernetes-ingress-controller/1.1.x/references/plugin-compatibility.md b/app/kubernetes-ingress-controller/1.1.x/references/plugin-compatibility.md deleted file mode 100644 index 66250f3b167f..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/references/plugin-compatibility.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Plugin Compatibility ---- - -DB-less mode is the preferred choice for controller-managed Kong and Kong -Enterprise clusters. However, not all plugins are available in DB-less mode. -Review the table below to check if a plugin you wish to use requires a -database. - -Note that some DB-less compatible plugins have some limitations or require -non-default configuration for -[compatibility](/gateway/latest/reference/db-less-and-declarative-config/#plugin-compatibility). - -## Kong - -| Plugin | Kong | Kong (DB-less) | -|-------------------------|-------------------------------|-------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | - -## {{site.ee_product_name}} - -{% include md/enterprise/k8s-image-note.md %} - -| Plugin | Kong for Kubernetes with {{site.ee_product_name}} | Kong for Kubernetes Enterprise | -|----------------------------------|--------------------------------------------|-------------------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | -| application-registration | | 1 | -| canary release | | | -| collector | | | -| degraphql | | | -| exit-transformer | | | -| forward-proxy | | | -| graphql-proxy-cache-advanced | | | -| graphql-rate-limiting-advanced | | | -| jwt-signer | | | -| kafka-log | | | -| kafka-upstream | | | -| key-auth-enc | | | -| ldap-auth-advanced | | | -| mtls-auth | | | -| oauth2-introspection | | | -| openid-connect | | | -| proxy-cache-advanced | | | -| rate-limiting-advanced | | | -| request-transformer-advanced | | 2 | -| request-validator | | | -| response-transformer-advanced | | | -| route-transformer-advanced | | | -| statsd-advanced | | 3 | -| vault-auth | | | - -1 Only used with Dev Portal - -2 request-transformer now has feature parity with - request-transformer-advanced. request-transformer-advanced remains only for - compatibility with existing configurations. - -3 Only used with Vitals diff --git a/app/kubernetes-ingress-controller/1.1.x/references/version-compatibility.md b/app/kubernetes-ingress-controller/1.1.x/references/version-compatibility.md deleted file mode 100644 index d8395932d47d..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/references/version-compatibility.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Version Compatibility ---- - -Kong's Kubernetes ingress controller is compatible with different flavors of Kong. -The following sections detail on compatibility between versions. - -## Kong - -By Kong, we are here referring to the official distribution of the Open-Source -{{site.base_gateway}}. - -| {{site.kic_product_name}} | <= 0.0.4 | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong 0.13.x | | | | | | | | | | | | | | | -| Kong 0.14.x | | | | | | | | | | | | | | | -| Kong 1.0.x | | | | | | | | | | | | | | | -| Kong 1.1.x | | | | | | | | | | | | | | | -| Kong 1.2.x | | | | | | | | | | | | | | | -| Kong 1.3.x | | | | | | | | | | | | | | | -| Kong 1.4.x | | | | | | | | | | | | | | | -| Kong 1.5.x | | | | | | | | | | | | | | | -| Kong 2.0.x | | | | | | | | | | | | | | | -| Kong 2.1.x | | | | | | | | | | | | | | | -| Kong 2.2.x | | | | | | | | | | | | | | | -| Kong 2.3.x | | | | | | | | | | | | | | | - -## Kong-enterprise-k8s - -{% include md/enterprise/k8s-image-note.md %} - -Kong-enterprise-k8s is an official distribution by Kong, Inc. which bundles -all enterprise plugins into Open-Source {{site.base_gateway}}. - -The compatibility for this distribution will largely follow that of the -Open-Source {{site.base_gateway}} compatibility (the previous section). - -| {{site.kic_product_name}} | 0.6.2+ | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | -|:----------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong-enterprise-k8s 1.3.x.y | | | | | | | | -| Kong-enterprise-k8s 1.4.x.y | | | | | | | | -| Kong-enterprise-k8s 2.0.x.y | | | | | | | | - -## {{site.ee_product_name}} - -{{site.ee_product_name}} is the official enterprise distribution, which includes all -other enterprise functionality, built on top of the Open-Source {{site.base_gateway}}. - -| {{site.kic_product_name}} | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| {{site.ee_product_name}} 0.32-x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.33-x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.34-x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.35-x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.36-x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.3.x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.5.x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.1.x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.2.x | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.3.x | | | | | | | | | | | | | | - -## Kubernetes - -| {{site.kic_product_name}} | 0.9.x | 0.10.x | 1.0.x | 1.1.x | -|:--------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kubernetes 1.13 | | | | | -| Kubernetes 1.14 | | | | | -| Kubernetes 1.15 | | | | | -| Kubernetes 1.16 | | | | | -| Kubernetes 1.17 | | | | | -| Kubernetes 1.18 | | | | | -| Kubernetes 1.19 | | | | | -| Kubernetes 1.20 | | | | | -| Kubernetes 1.21 | | | | | diff --git a/app/kubernetes-ingress-controller/1.1.x/troubleshooting.md b/app/kubernetes-ingress-controller/1.1.x/troubleshooting.md deleted file mode 100644 index 214e983aeaad..000000000000 --- a/app/kubernetes-ingress-controller/1.1.x/troubleshooting.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Debug & Troubleshooting ---- - -## Debug - -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: - -- `--v=3` shows details about the service, Ingress rule, and endpoint changes - -## Authentication to the Kubernetes API Server - -A number of components are involved in the authentication process and the first step is to narrow -down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. -Both authentications must work: - -```text -+-------------+ service +------------+ -| | authentication | | -+ apiserver +<-------------------+ ingress | -| | | controller | -+-------------+ +------------+ - -``` - -## Service authentication - -The Ingress controller needs information from API server to configure Kong. -Therefore, authentication is required, which can be achieved in two different ways: - -1. **Service Account**: This is recommended - because nothing has to be configured. - The Ingress controller will use information provided by the system - to communicate with the API server. - See 'Service Account' section for details. -1. **Kubeconfig file**: In some Kubernetes environments - service accounts are not available. - In this case, a manual configuration is required. - The Ingress controller binary can be started with the `--kubeconfig` flag. - The value of the flag is a path to a file specifying how - to connect to the API server. Using the `--kubeconfig` - does not require the flag `--apiserver-host`. - The format of the file is identical to `~/.kube/config` - which is used by `kubectl` to connect to the API server. - See 'kubeconfig' section for details. - -## Discovering API-server - -Using this flag `--apiserver-host=http://localhost:8080`, -it is possible to specify an unsecured API server or -reach a remote Kubernetes cluster using -[kubectl proxy](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#using-kubectl-proxy). -Please do not use this approach in production. - -In the diagram below you can see the full authentication flow with all options, starting with the browser -on the lower left hand side. - -```text - -Kubernetes Workstation -+---------------------------------------------------+ +------------------+ -| | | | -| +-----------+ apiserver +------------+ | | +------------+ | -| | | proxy | | | | | | | -| | apiserver | | ingress | | | | ingress | | -| | | | controller | | | | controller | | -| | | | | | | | | | -| | | | | | | | | | -| | | service account/ | | | | | | | -| | | kubeconfig | | | | | | | -| | +<-------------------+ | | | | | | -| | | | | | | | | | -| +------+----+ kubeconfig +------+-----+ | | +------+-----+ | -| |<--------------------------------------------------------| | -| | | | -+---------------------------------------------------+ +------------------+ -``` - -## Service Account - -If using a service account to connect to the API server, Dashboard expects the file -`/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret -token that is required to authenticate with the API server. - -Verify with the following commands: - -```shell -# start a container that contains curl -$ kubectl run test --image=tutum/curl -- sleep 10000 - -# check that container is running -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -test-701078429-s5kca 1/1 Running 0 16s - -# check if secret exists -$ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt -namespace -token - -# get service IP of master -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes 10.0.0.1 443/TCP 1d - -# check base connectivity from cluster inside -$ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 -Unauthorized - -# connect using tokens -$ TOKEN_VALUE=$(kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token) -$ echo $TOKEN_VALUE -eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A -$ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $TOKEN_VALUE" https://10.0.0.1 -{ - "paths": [ - "/api", - "/api/v1", - "/apis", - "/apis/apps", - "/apis/apps/v1alpha1", - "/apis/authentication.k8s.io", - "/apis/authentication.k8s.io/v1beta1", - "/apis/authorization.k8s.io", - "/apis/authorization.k8s.io/v1beta1", - "/apis/autoscaling", - "/apis/autoscaling/v1", - "/apis/batch", - "/apis/batch/v1", - "/apis/batch/v2alpha1", - "/apis/certificates.k8s.io", - "/apis/certificates.k8s.io/v1alpha1", - "/apis/extensions", - "/apis/extensions/v1beta1", - "/apis/policy", - "/apis/policy/v1alpha1", - "/apis/rbac.authorization.k8s.io", - "/apis/rbac.authorization.k8s.io/v1alpha1", - "/apis/storage.k8s.io", - "/apis/storage.k8s.io/v1beta1", - "/healthz", - "/healthz/ping", - "/logs", - "/metrics", - "/swaggerapi/", - "/ui/", - "/version" - ] -} -``` - -If it is not working, there are two possible reasons: - -1. The contents of the tokens are invalid. - Find the secret name with `kubectl get secrets --field-selector=type=kubernetes.io/service-account-token` and - delete it with `kubectl delete secret `. - It will automatically be recreated. -1. You have a non-standard Kubernetes installation - and the file containing the token may not be present. - -The API server will mount a volume containing this file, -but only if the API server is configured to use -the ServiceAccount admission controller. -If you experience this error, -verify that your API server is using the ServiceAccount admission controller. -If you are configuring the API server by hand, -you can set this with the `--admission-control` parameter. -Please note that you should use other admission controllers as well. -Before configuring this option, please read about admission controllers. - -More information: - -- [User Guide: Service Accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [Cluster Administrator Guide: Managing Service Accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) - -## Kubeconfig - -If you want to use a kubeconfig file for authentication, -follow the deploy procedure and -add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the deployment diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/custom-resources.md b/app/kubernetes-ingress-controller/1.2.x/concepts/custom-resources.md deleted file mode 100644 index 838984eec700..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/custom-resources.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Custom Resources ---- - -[Custom Resources][k8s-crd] in Kubernetes allow controllers -to extend Kubernetes-style -declarative APIs that are specific to certain applications. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -The {{site.kic_product_name}} uses the `configuration.konghq.com` API group -for storing configuration specific to Kong. - -The following CRDs allow users to declaratively configure all aspects of Kong: - -- [**KongIngress**](#kongingress) -- [**KongPlugin**](#kongplugin) -- [**KongClusterPlugin**](#kongclusterplugin) -- [**KongConsumer**](#kongconsumer) -- [**TCPIngress**](#tcpingress) -- [**KongCredential (Deprecated)**](#kongcredential-deprecated) - -## KongIngress - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, `KongIngress` Custom Resource is used as an -"extension" to the existing Ingress API to provide fine-grained control -over proxy behavior. -In other words, `KongIngress` works in conjunction with -the existing Ingress resource and extends it. -It is not meant as a replacement for the `Ingress` resource in Kubernetes. -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and [Route][kong-route] -entities in Kong related to an Ingress resource can be modified. - -Once a `KongIngress` resource is created, you can use the `configuration.konghq.com` -annotation to associate the `KongIngress` resource with an `Ingress` or a `Service` -resource: - -- When the annotation is added to the `Ingress` resource, the routing - configurations are updated, meaning all routes associated with the annotated - `Ingress` are updated to use the values defined in the `KongIngress`'s route - section. -- When the annotation is added to a `Service` resource in Kubernetes, - the corresponding `Service` and `Upstream` in Kong are updated to use the - `proxy` and `upstream` blocks as defined in the associated - `KongIngress` resource. - -The below diagram shows how the resources are linked -with one another: - -![Associating Kong Ingress](/assets/images/products/kubernetes-ingress-controller/kong-ingress-association.png "Associating Kong Ingress") - -## KongPlugin - -Kong is designed around an extensible [plugin][kong-plugin] -architecture and comes with a -wide variety of plugins already bundled inside it. -These plugins can be used to modify the request/response or impose restrictions -on the traffic. - -Once this resource is created, the resource needs to be associated with an -`Ingress`, `Service`, or `KongConsumer` resource in Kubernetes. -For more details, please read the reference documentation on `KongPlugin`. - -The below diagram shows how you can link `KongPlugin` resource to an -`Ingress`, `Service`, or `KongConsumer`: - -| | | -:-:|:-: -![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association1.png)|![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association2.png) - -## KongClusterPlugin - -_This resource requires the [`kubernetes.io/ingress.class` annotation](/kubernetes-ingress-controller/{{page.release}}/references/annotations/)._ - -KongClusterPlugin resource is exactly same as KongPlugin, except that it is a -Kubernetes cluster-level resources instead of being a namespaced resource. -This can help when the configuration of the plugin needs to be centralized -and the permissions to add/update plugin configuration rests with a different -persona than application owners. - -This resource can be associated with `Ingress`, `Service` or `KongConsumer` -and can be used in the exact same way as KongPlugin. - -A namespaced KongPlugin resource takes priority over a -KongClusterPlugin with the same name. - -## KongConsumer - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This custom resource configures `Consumers` in Kong. -Every `KongConsumer` resource in Kubernetes directly translates to a -[Consumer][kong-consumer] object in Kong. - -## TCPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing non-HTTP -and non-GRPC services running inside Kubernetes to -the outside world via Kong. This proves to be useful when -you want to use a single cloud LoadBalancer for all kinds -of traffic into your Kubernetes cluster. - -It is very similar to the Ingress resource that ships with Kubernetes. - -## KongCredential (Deprecated) - -Once a `KongConsumer` resource is created, -credentials associated with the `Consumer` can be provisioned inside Kong -using KongCredential custom resource. - -This Custom Resource has been deprecated and will be removed in a future -release. -Instead, please use secret-based credentials. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/deployment.md b/app/kubernetes-ingress-controller/1.2.x/concepts/deployment.md deleted file mode 100644 index 3fee76eeebd9..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/deployment.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Kong Ingress Controller Deployment ---- - -The {{site.kic_product_name}} is designed to be deployed in a variety of ways -based on uses-cases. This document explains various components involved -and choices one can make as per the specific use-case. - -- [**Kubernetes Resources**](#kubernetes-resources): - Various Kubernetes resources required to run the {{site.kic_product_name}}. -- [**Deployment options**](#deployment-options): - A high-level explanation of choices that one should consider and customize - the deployment to best serve a specific use case. - -## Kubernetes Resources - -The following resources are used to run the {{site.kic_product_name}}: - -- [Namespace](#namespace) -- [Custom resources](#custom-resources) -- [RBAC permissions](#rbac-permissions) -- [Ingress Controller Deployment](#ingress-controller-deployment) -- [Kong Proxy service](#kong-proxy-service) -- [Database deployment and migrations](#database-deployment-and-migration) - -These resources are created if the reference deployment manifests are used -to deploy the {{site.kic_product_name}}. -The resources are explained below for users to gain an understanding of how -they are used, so that they can be tweaked as necessary for a specific use-case. - -### Namespace - -> optional - -The {{site.kic_product_name}} can be deployed in any [namespace][k8s-namespace]. -If {{site.kic_product_name}} is being used to proxy traffic for all namespaces -in a Kubernetes cluster, which is generally the case, -it is recommended that it is installed in a dedicated -`kong` namespace but it is not required to do so. - -The example deployments present in this repository automatically create a `kong` -namespace and deploy resources into that namespace. - -### Custom Resources - -> required - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, custom resources are used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to the [custom resources](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/) -concept document for details. - -### RBAC permissions - -> required - -The {{site.kic_product_name}} communicates with the Kubernetes API-server and -dynamically configures Kong to automatically load balance across pods -of a service as any service is scaled in our out. - -For this reason, it requires RBAC permissions to access resources stored -in Kubernetes object store. - -It needs read permissions (get,list,watch) -on the following Kubernetes resources: - -- Endpoints -- Nodes -- Pods -- Secrets -- Ingress -- KongPlugins -- KongConsumers -- KongCredentials -- KongIngress - -By default, the controller listens for events and above resources across -all namespaces and will need access to these resources at the cluster level -(using `ClusterRole` and `ClusterRoleBinding`). - -In addition to these, it needs: - -- Create a ConfigMap and read and update ConfigMap for to facilitate - leader-election. Please read this [document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) - for more details. -- Update permission on the Ingress resource to update the status of - the Ingress resource. - -If the Ingress Controller is listening for events on a single namespace, -these permissions can be updated to restrict these permissions to a specific -namespace using `Role` and `RoleBinding resources`. - -In addition to these, it is necessary to create a `ServiceAccount`, which -has the above permissions. The Ingress Controller Pod then has this -`ServiceAccount` association. This gives the Ingress Controller process -necessary authentication and authorization tokens to communicate with the -Kubernetes API-server. - -### Ingress Controller deployment - -> required - -Kong Ingress deployment consists of the Ingress Controller deployed alongside -Kong. The deployment will be different depending on if a database is being -used or not. - -The deployment(s) is the core which actually runs the {{site.kic_product_name}}. - -See the [database](#database) section below for details. - -### Kong Proxy service - -> required - -Once the {{site.kic_product_name}} is deployed, one service is needed to -expose Kong outside the Kubernetes cluster so that it can receive all traffic -that is destined for the cluster and route it appropriately. -`kong-proxy` is a Kubernetes service which points to the Kong pods which are -capable of proxying request traffic. This service will be usually of type -`LoadBalancer`, however it is not required to be such. -The IP address of this service should be used to configure DNS records -of all the domains that Kong should be proxying, to route the traffic to Kong. - -### Database deployment and migration - -> optional - -The {{site.kic_product_name}} can run with or without a database. -If a database is being deployed, then following resources are required: - -- A `StatefulSet` which runs a PostgreSQL pod backed with a `PersistenceVolume` - to store Kong's configuration. -- An internal `Service` which resolves to the PostgreSQL pod. This ensures - that Kong can find the PostgreSQL instance using DNS inside - the Kubernetes cluster. -- A batch `Job` to run schema migrations. This is required to be executed once - to install bootstrap Kong's database schema. - Please note that on an any upgrade for Kong version, another `Job` will - need to be created if the newer version contains any migrations. - -To figure out if you should be using a database or not, please refer to the -[database](#database) section below. - -## Deployment options - -Following are the difference options to consider while deploying the -{{site.kic_product_name}} for your specific use case: - -- [**Kubernetes Service Type**](#kubernetes-service-types): - Chose between Load Balancer vs Node-Port -- [**Database**](#database): - Backing Kong with a Database or running without a database -- [**Multiple Ingress Controllers**](#multiple-ingress-controllers): - Running multiple {{site.kic_product_name}}s inside the same Kubernetes cluster -- [**Runtime**](#runtime): - Using Kong or {{site.ee_product_name}} (for {{site.ee_product_name}} customers) - -### Kubernetes Service Types - -Once deployed, any Ingress Controller needs to be exposed outside the -Kubernetes cluster to start accepting external traffic. -In Kubernetes, `Service` abstraction is used to expose any application -to the rest of the cluster or outside the cluster. - -If your Kubernetes cluster is running in a cloud environment, where -Load Balancers can be provisioned with relative ease, it is recommended -that you use a Service of type `LoadBalancer` to expose Kong to the outside -world. For the Ingress Controller to function correctly, it is also required -that a L4 (or TCP) Load Balancer is used and not an L7 (HTTP(s)) one. - -If your Kubernetes cluster doesn't support a service of type `LoadBalancer`, -then it is possible to use a service of type `NodePort`. - -### Database - -Until Kong 1.0, a database was required to run Kong. -Kong 1.1 introduced a new mode, DB-less, in which Kong can be configured -using a config file, and removes the need to use a database. - -It is possible to deploy and run the {{site.kic_product_name}} with or without a -database. The choice depends on the specific use-case and results in no -loss of functionality. - -#### Without a database - -In DB-less deployments, Kong's Kubernetes ingress controller runs -alongside Kong and configures Kong and dynamically configures -Kong as per the changes it receives from the Kubernetes API server. - -Following figure shows how this deployment looks like: - -![Kong DB-less](/assets/images/products/kubernetes-ingress-controller/dbless-deployment.png "Kong DB-less architecture") - -In this deployment, only one Deployment is required, which is comprised of -a Pod with two containers, a Kong container which proxies the requests -and a controller container which configures Kong. - -`kong-proxy` service would point to the ports of the Kong container in the -above deployment. - -Since each pod contains a controller and a Kong container, scaling out -simply requires horizontally scaling this deployment to handle more traffic -or to add redundancy in the infrastructure. - -#### With a Database - -In a deployment where Kong is backed by a DB, the deployment architecture -is a little different. - -Please refer to the below figure: - -![Kong with a Database](/assets/images/products/kubernetes-ingress-controller/db-deployment.png "Kong with database") - -In this type of deployment, there are two types of deployments created, -separating the control and data flow: - -- **Control-plane**: This deployment consists of a pod(s) running - the controller alongside - a Kong container, which can only configure the database. This deployment - does not proxy any traffic but only configures Kong. If multiple - replicas of this pod are running, a leader election process will ensure - that only one of the pods is configuring Kong's database at a time. -- **Data-plane**: This deployment consists of pods running a - single Kong container which can proxy traffic based on the configuration - it loads from the database. This deployment should be scaled to respond - to change in traffic profiles and add redundancy to safeguard from node - failures. -- **Database**: The database is used to store Kong's configuration and propagate - changes to all the Kong pods in the cluster. All Kong containers, in the - cluster should be able to connect to this database. - -A database driven deployment should be used if your use-case requires -dynamic creation of Consumers and/or credentials in Kong at a scale large -enough that the consumers will not fit entirely in memory. - -## Multiple Ingress Controllers - -It is possible to run multiple instances of the {{site.kic_product_name}} or -run a Kong {{site.kic_product_name}} alongside other Ingress Controllers inside -the same Kubernetes cluster. - -There are a few different ways of accomplishing this: - -- Using `kubernetes.io/ingress.class` annotation: - It is common to deploy Ingress Controllers on a cluster level, meaning - an Ingress Controller will satisfy Ingress rules created in all the namespaces - inside a Kubernetes cluster. - Use the annotation on Ingress and Custom resources to segment - the Ingress resources between multiple Ingress Controllers. - **Warning!** - When you use another Ingress Controller, which is default for cluster - (without set any `kubernetes.io/ingress.class`), be aware of using default `kong` - ingress class. There is special behavior of the default `kong` ingress class, - where any ingress resource that is not annotated is picked up. - Therefore with different ingress class then `kong`, you have to use that - ingress class with every Kong CRD object (plugin, consumer) which you use. -- Namespace based isolation: - {{site.kic_product_name}} supports a deployment option where it will satisfy - Ingress resources in a specific namespace. With this model, one can deploy - a controller in multiple namespaces and they will run in an isolated manner. -- If you are using {{site.ee_product_name}}, you can run multiple Ingress Controllers - pointing to the same database and configuring different Workspaces inside - {{site.ee_product_name}}. With such a deployment, one can use either of the above - two approaches to segment Ingress resources into different Workspaces in - {{site.ee_product_name}}. - -## Runtime - -The {{site.kic_product_name}} is compatible a variety of runtimes: - -### {{site.base_gateway}} (OSS) - -This is the [Open-Source Gateway](https://github.com/kong/kong) runtime. -The Ingress Controller is primarily developed against releases of the -open-source gateway. - -### {{site.ee_product_name}} K8S - -If you are a {{site.ee_product_name}} customer, you have access to two more runtimes. - -The first one, {{site.ee_product_name}} K8S, is an package that takes the Open-Source -{{site.base_gateway}} and adds enterprise-only plugins to it. - -You simply need to deploy {{site.ee_product_name}} K8S instead of the Open-Source -Gateway in-order to take full-advantage of enterprise plugins. - -### {{site.ee_product_name}} - -The {{site.kic_product_name}} is also compatible with the full-blown version of -{{site.ee_product_name}}. This runtime ships with Kong Manager, Kong Portal, and a -number of other enterprise-only features. -[This doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise) provides a high-level -overview of the architecture. - -[k8s-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/design.md b/app/kubernetes-ingress-controller/1.2.x/concepts/design.md deleted file mode 100644 index 62402fec8f0b..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/design.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kong Ingress Controller Design ---- - -## Overview - -The {{site.kic_product_name}} configures Kong -using Ingress resources created inside a Kubernetes cluster. - -The {{site.kic_product_name}} is made up of two components: - -- Kong, the core proxy that handles all the traffic -- Controller, a process that syncs the configuration from Kubernetes to Kong - -The {{site.kic_product_name}} performs more than just proxying the traffic coming -into a Kubernetes cluster. It is possible to configure plugins, -load balancing, health checking and leverage all that Kong offers in a -standalone installation. - -The following figure shows how it works: - -![high-level-design](/assets/images/products/kubernetes-ingress-controller/high-level-design.png "High Level Design") - -The Controller listens for changes happening inside the Kubernetes -cluster and updates Kong in response to those changes to correctly -proxy all the traffic. - -Kong is updated dynamically to respond to changes around scaling, -configuration changes, failures that are happening inside a Kubernetes -cluster. - -## Translation - -Kubernetes resources are mapped to Kong resources to correctly -proxy all the traffic. - -The following figure describes the mapping between Kubernetes concepts -to Kong's configuration: - -![translating k8s to kong](/assets/images/products/kubernetes-ingress-controller/k8s-to-kong.png "Translating k8s resources to Kong") - -Let's go through how Kubernetes resources are being mapped to Kong's -configuration: - -- An [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) - resource in Kubernetes defines a set of rules for proxying - traffic. These rules corresponds to the concept of Route in Kong. -- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - inside Kubernetes is a way to abstract an application that is - running on a set of pods. - This maps to two objects in Kong: Service and Upstream. - The service object in Kong holds the information on the protocol - to use to talk to the upstream service and various other protocol - specific settings. The Upstream object defines load balancing - and healthchecking behavior. -- Pods associated with a Service in Kubernetes map as a Target belonging - to the Upstream (the upstream corresponding to the Kubernetes - Service) in Kong. Kong load balances across the Pods of your service. - This means that all requests flowing through Kong are not directed via - kube-proxy but directly to the pod. - -For more information on how Kong works with Routes, Services, and Upstreams, -please see the [Proxy](/gateway/latest/reference/proxy/) -and [Load balancing](/gateway/latest/reference/loadbalancing/) references. diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/ha-and-scaling.md b/app/kubernetes-ingress-controller/1.2.x/concepts/ha-and-scaling.md deleted file mode 100644 index 86b6e710c9f4..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/ha-and-scaling.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: High-availability and Scaling ---- - -## High availability - -The {{site.kic_product_name}} is designed to be reasonably easy to operate and -be highly available, meaning, when some expected failures do occur, the -Controller should be able to continue to function with minimum possible -service disruption. - -The {{site.kic_product_name}} is composed of two parts: 1. Kong, which handles -the requests, 2. Controller, which configures Kong dynamically. - -Kong itself can be deployed in a Highly available manner by deploying -multiple instances (or pods). Kong nodes are state-less, meaning a Kong pod -can be terminated and restarted at any point of time. - -The controller itself can be stateful or stateless, depending on if a database -is being used or not. - -If a database is not used, then the Controller and Kong are deployed as -colocated containers in the same pod and each controller configures the Kong -container that it is running with. - -For cases when a database is necessary, the Controllers can be deployed -on multiple zones to provide redudancy. In such a case, a leader election -process will elect one instance as a leader, which will manipulate Kong's -configuration. - -### Leader election - -The {{site.kic_product_name}} performs a leader-election when multiple -instances of the controller are running to ensure that only a single Controller -is actively pushing changes to Kong's database (when running in DB-mode). -If multiple controllers are making changes to the database, it is possible that -the controllers step over each other. -If an instance of the controller fails, any other container which is a follower, -takes up the leadership and then continues syncing Kong's configuration from -Kubernetes. - -For this reason, the Controller needs permission to create a ConfigMap. -By default, the permission is given at Cluster level but it can be narrowed -down to a single namespace (using Role and RoleBinding) for a stricter RBAC -policy. - -It also needs permission to read and update this ConfigMap. -This permission can be specific to the ConfigMap that is being used -for leader-election purposes. -The name of the ConfigMap is derived from the value of election-id CLI flag -(default: `ingress-controller-leader`) and -ingress-class (default: `kong`) as: "-". -For example, the default ConfigMap that is used for leader election will -be "ingress-controller-leader-kong", and it will be present in the same -namespace that the controller is deployed in. - -## Scaling - -Kong is designed to be horizontally scalable, meaning as traffic increases, -multiple instances of Kong can be deployed to handle the increase in load. - -The configuration is either pumped into Kong directly via the Ingress -Controller or loaded via the database. Kong containers can be considered -stateless as the configuration is either loaded from the database (and -cached heavily in-memory) or loaded in-memory directly via a config file. - -One can use a `HorizontalPodAutoscaler` (HPA) based on metrics -like CPU utilization, bandwidth being used, total request count per second -to dynamically scale {{site.kic_product_name}} as the traffic profile changes. diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-classes.md b/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-classes.md deleted file mode 100644 index 85790f0dbd66..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-classes.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Kong Ingress Controller and Ingress Class ---- - -## Introduction - -The {{site.kic_product_name}} uses ingress classes to filter Kubernetes Ingress -objects and other resources before converting them into Kong configuration. -This allows it to coexist with other ingress controllers and/or other -deployments of the {{site.kic_product_name}} in the same cluster: a -{{site.kic_product_name}} will only process configuration marked for its use. - -## Configuring the controller ingress class - -The `--ingress-class` flag (or `CONTROLLER_INGRESS_CLASS` environment variable) -specify the ingress class expected by the {{site.kic_product_name}}. By default, -it expects the `kong` class. - -## Loading resources by class - -The {{site.kic_product_name}} translates a variety of Kubernetes resources into -Kong configuration. Broadly speaking, we can separate these resources into two -categories: - -- Resources that the controller translates directly into Kong configuration. -- Resources referenced by some other resource, where the other resource is - directly translated into Kong configuration. - -For example, an Ingress is translated directly into a Kong route, and a -KongConsumer is translated directly into a -[Kong consumer](/gateway/api/admin-ee/latest/#/Consumers/list-consumer/). A Secret containing -an authentication plugin credential is _not_ translated directly: it is only -translated into Kong configuration if a KongConsumer resource references it. - -Because they create Kong configuration independent of any other resources, -directly-translated resources require an ingress class, and their class must -match the class configured for the controller. Referenced resources do not -require a class, but must be referenced by a directly-translated resource -that matches the controller. - -### Adding class information to resources - -Most resources use a [kubernetes.io/ingress-class annotation][class-annotation] -to indicate their class. There are several exceptions: - -- v1 Ingress resources have a dedicated `class` field. -- Knative Services [use the class specified][knative-class] by the - `ingress.class` key of the Knative installation's `config-network` ConfigMap. - You can optionally [override this on a per-Service basis][knative-override] - by adding a `networking.knative.dev/ingress.class` annotation to the Service. - -### Enabling support for classless resources - -Specifying a class is optional for some resources. Although specifying a class -is recommended, you can instruct the controller to process resources without a -class annotation using flags: - -- `--process-classless-ingress-v1beta1` instructs the controller to translate - v1beta1 Ingress resources with no class annotation. -- `--process-classless-kong-consumer` instructs the controller to translate - KongConsumer resources with no class annotation. - -These flags are primarily intended for compatibility with older configuration -({{site.kic_product_name}} before 0.10 had less strict class -requirements, and it was common to omit class annotations). If you are creating -new configuration and do not have older configuration without class -annotations, recommended best practice is to add class information to Ingress -and KongConsumer resources and not set the above flags. Doing so avoids -accidentally creating duplicate configuration in other ingress controller -instances. - -These flags do not _ignore_ `ingress.class` annotations: they allow resources -with no such annotation, but will not allow resource that have a non-matching -`ingress.class` annotation. - -## When to use a custom class - -Using the default `kong` class is fine for simpler deployments, where only one -{{site.kic_product_name}} instance is running in a cluster. Changing the class is -typical when: - -- You install multiple Kong environments in one Kubernetes cluster to handle - different types of ingress traffic, e.g. when using separate Kong instances - to handle traffic on internal and external load balancers, or deploying - different types of non-production environments in a single test cluster. -- You install multiple controller instances alongside a single Kong cluster to - separate configuration into different Kong workspaces (using the - `--kong-workspace` flag) or to restrict which Kubernetes namespaces any one - controller instance has access to. - -## Legacy behavior - -This overview covers behavior in {{site.kic_product_name}} version 0.10.0 onward. -Earlier versions had a special case for the default class and a bug affecting -custom classes: - -- When using the default `kong` class, the controller would always process - classless resources in addition to `kong`-class resources. When using a - non-default controller class, the controller would only process resources - with that class, not classless resources. Although this was by design, it was - a source of user confusion. -- When using a custom controller class, some resources that should not have - required a class (because they were referenced by other resources) - effectively did require a class: while these resources were loaded initially, - the controller would not track updates to them unless they had a class - annotation. - -In versions 0.10.0+ you must instruct the controller to load classless -resources, which is allowed (but not recommended) for either the default or -custom classes. Resources referenced by another resource are always loaded and -updated correctly regardless of which class you set on the controller; you do -not need to add class annotations to these resources when using a custom class. - -## Examples - -Typical configurations will include a mix of resources that have class -information and resources that are referenced by them. For example, consider -the following configuration for authenticating a request, using a KongConsumer, -credential Secret, Ingress, and KongPlugin (a Service is implied, but not -shown): - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: dyadya-styopa - annotations: - kubernetes.io/ingress.class: "kong" -username: styopa -credentials: -- styopa-key - ---- - -kind: Secret -apiVersion: v1 -stringData: - key: bylkogdatomoryakom - kongCredType: key-auth -metadata: - name: styopa-key - ---- - -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: ktonezhnaet - annotations: - kubernetes.io/ingress.class: "kong" - konghq.com/plugins: "key-auth-example" -spec: - rules: - - http: - paths: - - path: /vsemznakom - backend: - serviceName: httpbin - servicePort: 80 - ---- - -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: key-auth-example -plugin: key-auth -``` - -The KongConsumer and Ingress resources both have class annotations, as they are -resources that the controller uses as a basis for building Kong configuration. -The Secret and KongPlugin _do not_ have class annotations, as they are -referenced by other resources that do. - -[class-annotation]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#kubernetesioingressclass -[knative-class]: /kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/#ingress-class -[knative-override]: https://knative.dev/docs/serving/services/ingress-class/ diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-versions.md b/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-versions.md deleted file mode 100644 index 456959b77e5f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/ingress-versions.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Ingress v1 and v1beta1 Differences ---- - -## Introduction - -Kubernetes 1.19 introduced a new `networking.k8s.io/v1` API for the [Ingress resource][kubernetes-ingress-doc]. -It standardizes common practices and clarifies implementation requirements that -were previously up to individual controller vendors. This document covers those -changes as they relate to {{site.kic_product_name}} and provides sample -equivalent `networking.k8s.io/v1beta1` and `networking.k8s.io/v1` resources for comparison. - -## Paths - -Both Ingress v1beta1 and v1 HTTP rules require a path, which represents a [URI -path][uri-rfc-paths]. Although v1beta1 had specified that paths were [POSIX -regular expressions][posix-regex] and enforced this, in practice most -controllers used other implementations that did not match the -specification. v1 seeks to reduce confusion by introducing several [path -types][path-types] and lifting restrictions on regular expression grammars used -by controllers. - -### networking.k8s.io/v1beta1 - -The controller passes paths directly to Kong and relies on its [path handling -logic][kong-paths]. The Kong proxy treats paths as a prefix unless they include -characters [not allowed in RFC 3986 paths][uri-rfc-paths], in which case the -proxy assumes they are a regular expression, and does not treat slashes as -special characters. For example, the prefix `/foo` can match any of the -following: - -``` -/foo -/foo/ -/foobar -/foo/bar -``` - -### networking.k8s.io/v1 - -Although v1 Ingresses provide path types with more clearly-defined logic, the -controller must still create Kong routes and work within the Kong proxy's -routing logic. As such, the controller translates Ingress rule paths to create -Kong routes that match one of the following specifications: `Exact`, `Prefix`, or `ImplementationSpecific`. - -#### Exact - -If `pathType` is `Exact`, the controller creates a Kong route with a regular -expression that matches the rule path only. For example, an exact rule for `/foo` in an -Ingress translates to a Kong route with a `/foo$` regular expression path. - -#### Prefix - -If `pathType` is `Prefix`, the controller creates a Kong route with two path -criteria. For example, `/foo` will create a route with a `/foo$` regular expression and -`/foo/` plain path. - -#### ImplementationSpecific - -The controller leaves `ImplementationSpecific` path rules entirely up to the Kong -router. It creates a route with the exact same path string as the Ingress rule. - -{:.important} -> Both `Prefix` and `Exact` paths modify the paths you -provide, and those modifications may interfere with user-provided regular -expressions. If you are using your own regular expressions in paths, use -`ImplementationSpecific` to avoid unexpected behavior. -## Ingress class - -[Ingress class][ingress-class] indicates which resources an ingress controller -should process. It provides a means to separate out configuration intended for -other controllers or other instances of the {{site.kic_product_name}}. - -In v1beta1, ingress class was handled informally using -`kubernetes.io/ingress.class` [annotations][deprecated-annotation]. v1 -introduces a new [IngressClass resource][ingress-class-api] which provides -richer information about the controller. v1 Ingresses are bound to a class via -their `ingressClassName` field. - -For example, consider this v1beta1 Ingress: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kubernetes.io/ingress.class: "kong" -spec: - rules: - - host: example.com - http: - paths: - - path: /test - backend: - serviceName: echo - servicePort: 80 -``` - -Its ingress class annotation is set to `kong`, and ingress controllers set to -process `kong` class Ingresses will process it. - -In v1, the equivalent configuration declares a `kong` IngressClass resource -whose `metadata.name` field indicates the class name. The `ingressClassName` -value of the Ingress object must match the value of the `name` field in the -IngressClass metadata: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: kong -spec: - controller: ingress-controllers.konghq.com/kong ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - service: - name: test - port: - number: 80 -``` - -## Hostnames - -Ingress v1 formally codifies support for [wildcard hostnames][wildcard-hostnames]. -v1beta1 Ingresses did not reject wildcard hostnames, however, and Kong had -[existing support for them][kong-wildcard-hostnames]. - -As such, while the v1beta1 specification did not officially support wildcard -hostnames, you can use wildcard hostnames with either version. Setting a -hostname like `*.example.com` will match requests for both `foo.example.com` -and `bar.example.com` with either v1 or v1beta1 Ingresses. - -## Backend types - -Ingress v1 introduces support for backends other than Kubernetes Services through -[resource backends][resource-backends]. - -Kong does not support any dedicated resource backend configurations, though it -does have support for Routes without Services in some cases (for example, when -using the [AWS Lambda plugin][lambda-plugin]). For these routes, you should -create a placeholder Kubernetes Service for them, using an [ExternalName -Service][external-name] with an RFC 2606 invalid hostname, e.g. -`kong.invalid`. You can use these placeholder services with either v1 or -v1beta1 Ingresses. - -[kubernetes-ingress-doc]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-class]: /kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes -[uri-rfc-paths]: https://tools.ietf.org/html/rfc3986#section-3.3 -[posix-regex]: https://www.boost.org/doc/libs/1_38_0/libs/regex/doc/html/boost_regex/syntax/basic_extended.html -[path-types]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -[kong-paths]: /gateway/latest/reference/proxy/#request-path -[wildcard-hostnames]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards -[kong-wildcard-hostnames]: /gateway/latest/reference/proxy/#using-wildcard-hostnames -[resource-backends]: https://kubernetes.io/docs/concepts/services-networking/ingress/#resource-backend -[lambda-plugin]: /hub/kong-inc/aws-lambda/ -[external-name]: https://kubernetes.io/docs/concepts/services-networking/service/#externalname -[deprecated-annotation]: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation -[ingress-class-api]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-class-v1/ diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/k4k8s-with-kong-enterprise.md b/app/kubernetes-ingress-controller/1.2.x/concepts/k4k8s-with-kong-enterprise.md deleted file mode 100644 index 4331901f398f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/k4k8s-with-kong-enterprise.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -Kong for Kubernetes is a {{site.kic_product_name}} built on top -of Open-Source {{site.base_gateway}}. - -If you are an Enterprise customer, you have an option of running the -[Enterprise version](/gateway/latest/install-and-run/kubernetes/) -of the Ingress Controller, which includes -all the Enterprise plugins but does not include Kong Manager or any -other Enterprise features. This makes it possible to -run the Ingress layer without a database, providing a very low -operational and maintenance footprint. - -However, in some cases, those enterprise features are necessary, -and for such use-cases we support another deployment - Kong for -Kubernetes with {{site.ee_product_name}}. - -As seen in the diagram below, this deployment consists of -Kong for Kubernetes deployed in Kubernetes, and is hooked up with -a database. If there are services running outside Kubernetes, -a regular {{site.base_gateway}} proxy can be deployed there and connected to the -same database. This provides a single pane of visibility of -all services that are running in your infrastructure. - -![architecture-overview](/assets/images/products/kubernetes-ingress-controller/k4k8s-with-kong-enterprise.png "K4K8S with {{site.ee_product_name}}") - -In this deployment model, the database for Kong can be hosted anywhere. -It can be a managed DBaaS service like Amazon RDS, Google Cloud -SQL or a Postgres instance managed in-house or even an instance -deployed on Kubernetes. -If you are following this model, please keep in mind the following: -- It is recommended to not deploy Postgres on Kubernetes, - due to the fact that running stateful applications on Kubernetes - is challenging to get right. -- Ensure that you have the same image/package of {{site.ee_product_name}} - running across the fleet. This means that all Kong instances that are - connected to the same database must use the - same version of {{site.ee_product_name}} package. - -[This guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise) -walks through the setup of the above architecture. diff --git a/app/kubernetes-ingress-controller/1.2.x/concepts/security.md b/app/kubernetes-ingress-controller/1.2.x/concepts/security.md deleted file mode 100644 index bbb08cd9073b..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/concepts/security.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Security ---- - -This document explains the security aspects of the {{site.kic_product_name}}. - -The {{site.kic_product_name}} communicates with Kubernetes API-server and Kong's -Admin API. APIs on both sides offer authentication/authorization features -and the controller integrates with them gracefully. - -## Kubernetes RBAC - -The {{site.kic_product_name}} is deployed with RBAC permissions as explained in the -[deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment) document. -It has read and list permissions on most resources but requires update -and create permission for a few resources to provide seamless integration. -The permissions can be locked down further if needed depending on the specific -use-case. -This RBAC policy is associated with a ServiceAccount and the ServiceAccount -is associated with the {{site.kic_product_name}}. -The Controller uses the ServiceAccount credential to authenticate and -authorize itself against the Kubernetes API-server. - -## Kong Admin API Protection - -Kong's Admin API is used to control configuration of Kong and proxying behavior. -If an attacker happens to gain access to Kong's Admin API, they -will be able to perform all actions as an authorized user like -modifying or deleting Kong's configuration. -Hence, it is important that the deployment -ensures that the likelihood of this happening is as small as possible. - -In the example deployments, the Controller and Kong's Admin API communicate -over the loopback (`lo`) interface of the pod. -Kong is not performing any kind of authorization or -authentication on the Admin API, hence the API is accessible only -on the loopback interface to limit the attack surface. -Although not ideal, this setup requires fewer steps -to get started and can be further hardened as required. - -Please note that it is very important that Kong's Admin API is not accessible -inside the cluster as any malicious service can change Kong's configuration. -If you're exposing Kong's Admin API itself outside the cluster, please ensure -that you have the necessary authentication in place first. - -### Authentication on Kong's Admin API - -If Kong's Admin API is protected with one of the authentication plugins, -the Controller can authenticate itself against it to add another layer of -security. -The Controller comes with support for injecting arbitrary HTTP headers -in the requests it makes to Kong's Admin API, which can be used to inject -authentication credentials. -The headers can be specified using the CLI flag `--kong-admin-header` in the Ingress -Controller. - -The Ingress Controller will support mutual-TLS-based authentication on Kong's Admin -API in future. - -### {{site.ee_product_name}} RBAC - -{{site.ee_product_name}} comes with support for authentication and authorization on -Kong's Admin API. - -Once an RBAC token is provisioned, the {{site.kic_product_name}} can use the RBAC -token to authenticate against {{site.ee_product_name}}. Use the `--kong-admin-header` CLI -flag to pass the RBAC token the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/admission-webhook.md b/app/kubernetes-ingress-controller/1.2.x/deployment/admission-webhook.md deleted file mode 100644 index 23ee8d5da9fb..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/admission-webhook.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Validating Admission Controller ---- - -The {{site.kic_product_name}} ships with an Admission Controller for KongPlugin -and KongConsumer resources in the `configuration.konghq.com` API group. - -The Admission Controller needs a TLS certificate and key pair which -you need to generate as part of the deployment. - -Following guide walks through a setup of how to create the required key-pair -and enable the admission controller. - -Please note that this requires {{site.kic_product_name}} >= 0.6 to be -already installed in the cluster. - -## tldr; - -If you are using the stock YAML manifests to install and setup Kong for -Kubernetes, then you can setup the admission webhook using a single command: - -```bash -curl -sL https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/hack/deploy-admission-controller.sh | bash - -``` - -This script takes all the following commands and packs them together. -You need `kubectl` and `openssl` installed on your workstation for this to -work. - -## Create a certificate for the admission controller - -Kuberentes API-server makes an HTTPS call to the Admission Controller to verify -if the custom resource is valid or not. For this to work, Kubernetes API-server -needs to trust the CA certificate that is used to sign Admission Controller's -TLS certificate. - -This can be accomplished either using a self-signed certificate or using -Kubernetes CA. Follow one of the steps below and then go to -[Create the secret](#create-the-secret) step below. - -Please note the `CN` field of the x509 certificate takes the form -`..svc`, which -in the default case is `kong-validation-webhook.kong.svc`. - -### Using self-signed certificate - -Use openssl to generate a self-signed certificate: - -```bash -$ openssl req -x509 -newkey rsa:2048 -keyout tls.key -out tls.crt -days 365 \ - -nodes -subj "/CN=kong-validation-webhook.kong.svc" \ - -extensions EXT -config <( \ - printf "[dn]\nCN=kong-validation-webhook.kong.svc\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:kong-validation-webhook.kong.svc\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -Generating a 2048 bit RSA private key -..........................................................+++ -.............+++ -writing new private key to 'key.pem' -``` - -### Using in-built Kubernetes CA - -Kubernetes comes with an in-built CA which can be used to provision -a certificate for the Admission Controller. -Please refer to the -[this guide](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) -on how to generate a certificate using the in-built CA. - -### Create the secret - -Next, create a Kubernetes secret object based on the key and certificate that -was generatd in the previous steps. -Here, we assume that the PEM-encoded certificate is stored in a file named -`tls.crt` and private key is stored in `tls.key`. - -```bash -$ kubectl create secret tls kong-validation-webhook -n kong \ - --key tls.key --cert tls.crt -secret/kong-validation-webhook created -``` - -## Update the deployment - -Once the secret is created, update the Ingress Controller deployment: - -Execute the following command to patch the {{site.kic_product_name}} deployment -to mount the certificate and key pair and also enable the admission controller: - -```bash -$ kubectl patch deploy -n kong ingress-kong \ - -p '{"spec":{"template":{"spec":{"containers":[{"name":"ingress-controller","env":[{"name":"CONTROLLER_ADMISSION_WEBHOOK_LISTEN","value":":8080"}],"volumeMounts":[{"name":"validation-webhook","mountPath":"/admission-webhook"}]}],"volumes":[{"secret":{"secretName":"kong-validation-webhook"},"name":"validation-webhook"}]}}}}' -deployment.extensions/ingress-kong patched -``` - -## Enable the validating admission - -If you are using Kubernetes CA to generate the certificate, you don't need -to supply a CA certificate (in the `caBunde` param) -as part of the Validation Webhook configuration -as the API-server already trusts the internal CA. - -```bash -$ echo "apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: kong-validations -webhooks: -- name: validations.kong.konghq.com - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: ["v1beta1"] - rules: - - apiGroups: - - configuration.konghq.com - apiVersions: - - '*' - operations: - - CREATE - - UPDATE - resources: - - kongconsumers - - kongplugins - - apiGroups: - - '' - apiVersions: - - 'v1' - operations: - - CREATE - - UPDATE - resources: - - secrets - clientConfig: - service: - namespace: kong - name: kong-validation-webhook - caBundle: $(cat tls.crt | base64 -w 0) " | kubectl apply -f - -``` - -## Verify if it works - -### Verify duplicate KongConsumers - -Create a KongConsumer with username as `harry`: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, create another KongConsumer with the same username: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry2 - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: consumer already exists -``` - -The validation webhook rejected the KongConsumer resource as there already -exists a consumer in Kong with the same username. - -### Verify incorrect KongPlugins - -Try to create the folowing KongPlugin resource. -The `foo` config property does not exist in the configuration definition and -hence the Admission Controller returns back an error. -If you remove the `foo: bar` configuration line, the plugin will be -created succesfully. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - foo: bar - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: 400 Bad Request {"fields":{"config":{"foo":"unknown field"}},"name":"schema violation","code":2,"message":"schema violation (config.foo: unknown field)"} -``` - -### Verify incorrect credential secrets - -With 0.7 and above versions of the controller, validations also take place -for incorrect secret types and wrong parameters to the secrets: - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=basic-auth \ - --from-literal=username=foo -Error from server: admission webhook "validations.kong.konghq.com" denied the request: missing required field(s): password -``` - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=wrong-auth \ - --from-literal=sdfkey=my-sooper-secret-key -Error from server: admission webhook "validations.kong.konghq.com" denied the request: invalid credential type: wrong-auth -``` diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/aks.md b/app/kubernetes-ingress-controller/1.2.x/deployment/aks.md deleted file mode 100644 index aab79c47e6cd..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/aks.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong Ingress on Azure Kubernetes Service (AKS) ---- - -## Requirements - -1. A fully functional AKS cluster. - Please follow Azure's Guide to - [set up an AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the AKS Kubernetes - cluster you'll work on. The above AKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It will take a few minutes for all containers to start and report -healthy status. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Set up environment variables - -Next, set up an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's set up an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Microsoft Azure to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/eks.md b/app/kubernetes-ingress-controller/1.2.x/deployment/eks.md deleted file mode 100644 index 209a750f4edf..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/eks.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Kong Ingress on Elastic Kubernetes Service (EKS) ---- - -## Requirements - -1. A fully functional EKS cluster. - Please follow Amazon's Guide to - [set up an EKS cluster](https://aws.amazon.com/getting-started/projects/deploy-kubernetes-app-amazon-eks/). -2. Basic understanding of Kubernetes -3. A working `kubectl` linked to the EKS Kubernetes - cluster we will work on. The above EKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It may take a few minutes for all containers to start and report -healthy statuses. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, create an environment variable with the IP address at which -Kong is accessible. This IP address sends requests to the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 example.eu-west-1.elb.amazonaws.com 80:31929/TCP,443:31408/TCP 57d -``` - -Create an environment variable to hold the ELB hostname: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].hostname}" service -n kong kong-proxy) -``` - -> Note: It may take some time for Amazon to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## TLS configuration - -Versions of Kong prior to 2.0.0 default to using [the "modern" cipher suite -list](https://wiki.mozilla.org/Security/Server_Side_TLS). This is not -compatible with ELBs when the ELB terminates TLS at the edge and establishes a -new session with Kong. This error will appear in Kong's logs: - -``` -*7961 SSL_do_handshake() failed (SSL: error:1417A0C1:SSL routines:tls_post_process_client_hello:no shared cipher) while SSL handshaking -``` - -To correct this issue, set `KONG_SSL_CIPHER_SUITE=intermediate` in your -environment variables. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/gke.md b/app/kubernetes-ingress-controller/1.2.x/deployment/gke.md deleted file mode 100644 index 52de2a69d458..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/gke.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Kong Ingress on Google Kubernetes Engine (GKE) ---- - -## Requirements - -1. A fully functional GKE cluster. - The easiest way to do this is to do it via the web UI: - Go to Google Cloud's console > Kubernetes Engine > Cluster > - Create a new cluster. - This documentation has been tested on a zonal cluster in - europe-west-4a using 1.10.5-gke.4 as Master version. - The default pool has been assigned 2 nodes of kind 1VCPU - with 3.75GB memory (default setting). - The OS used is COS (Container Optimized OS) and the auto-scaling - has been enabled. Default settings are being used except for - `HTTP load balancing` which has been disabled (you probably want to use - Kong features for this). For more information on GKE clusters, - refer to - [the GKE documentation](https://cloud.google.com/kubernetes-engine/docs/). -1. If you wish to use a static IP for Kong, you have to reserve a static IP - address (in Google Cloud's console > VPC network > - External IP addresses). For information, - you must create a regional IP - global is not supported as `loadBalancerIP` yet) -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the GKE Kubernetes - cluster we will work on. For information, you can associate a new `kubectl` - context by using: - - ```bash - gcloud container clusters get-credentials --zone --project - ``` - -## Update User Permissions - -> Because of [the way Kubernetes Engine checks permissions -when you create a Role or ClusterRole](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control), you must -first create a RoleBinding that grants you all of -the permissions included in the role you want to create. -An example workaround is to create a RoleBinding that -gives your Google identity a cluster-admin role -before attempting to create additional Role or -ClusterRole permissions. -This is a known issue in RBAC in Kubernetes and -Kubernetes Engine versions 1.6 and -later. - -A fast workaround: - -```yaml - -echo -n " -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: cluster-admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: User - name: # usually the Google account - # e.g.: example@testorg.com - namespace: kube-system" | kubectl apply -f - - -``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Google to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s-enterprise.md b/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s-enterprise.md deleted file mode 100644 index 0e5dfae63cb9..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s-enterprise.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Kong for Kubernetes Enterprise ---- - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -## Prerequisites - -Before we can deploy Kong, we need to satisfy one prerequisite: create a license -secret. - -To create this secret, provision the `kong` namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -Enterprise version requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -If you do not have one, please contact your sales representative. -Save the license file temporarily to disk with filename `license` -and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -## Installers - -Once the secret is in-place, we can proceed with installation. - -Kong for Kubernetes can be installed using an installer of -your choice: - -### YAML manifests - -Execute the following to install Kong for Kubernetes Enterprise using YAML -manifests: - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml -``` - -It takes a few minutes the first time this setup is done. - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-6ffcf8c447-5qv6z 2/2 Running 1 44m -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.254.78 35.233.198.16 80:32697/TCP,443:32365/TCP 22h -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP address assigned to the service. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for your cloud provider to actually associate the -IP address to the `kong-proxy` Service. - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes Enterprise: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/enterprise-k8s -``` - -You can use the above URL as a base kustomization and build on top of it -as well. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name - --namespace kong \ - -f values.yaml \ - --set ingressController.installCRDs=false -``` - -### Example values.yaml -``` -image: - repository: kong/kong-gateway - tag: 2.2.1.0-alpine -env: - LICENSE_DATA: - valueFrom: - secretKeyRef: - name: kong-enterprise-license - key: license -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes Enterprise - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s.md b/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s.md deleted file mode 100644 index 288703cd434b..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/k4k8s.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong for Kubernetes ---- - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -## Installers - -Kong for Kubernetes can be installed using an installer of -your choice. - -Once you've installed Kong for Kubernetes, -jump to the [next section](#using-kong-for-kubernetes) -on using it. - -### YAML manifests - -Please pick one of the following guides depending on your platform: - -- [Minikube](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/) -- [Google Kubernetes Engine(GKE) by Google](/kubernetes-ingress-controller/{{page.release}}/deployment/gke/) -- [Elastic Kubernetes Service(EKS) by Amazon](/kubernetes-ingress-controller/{{page.release}}/deployment/eks/) -- [Azure Kubernetes Service(AKS) by Microsoft](/kubernetes-ingress-controller/{{page.release}}/deployment/aks/) - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/base -``` - -You can use the above URL as a base kustomization and build on top of it -to make it suite better for your cluster and use-case. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes - -Once you've installed Kong for Kubernetes, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/kong-enterprise.md b/app/kubernetes-ingress-controller/1.2.x/deployment/kong-enterprise.md deleted file mode 100644 index e2f7648a283c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/kong-enterprise.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -This guide walks through setting up the {{site.kic_product_name}} using Kong -Enterprise. This architecture is described in detail in [this doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise/). - -We assume that we start from scratch and you don't have {{site.ee_product_name}} -deployed. For the sake of simplicity, we will deploy {{site.ee_product_name}} and -its database in Kubernetes itself. You can safely run them outside -Kubernetes as well. - -## Prerequisites - -Before we can deploy the {{site.kic_product_name}} with {{site.ee_product_name}}, -we need to satisfy the following prerequisites: -- [{{site.ee_product_name}} License secret](#kong-enterprise-license-secret) -- [{{site.ee_product_name}} bootstrap password](#kong-enterprise-bootstrap-password) - -In order to create these secrets, let's provision the `kong` -namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -{{site.ee_product_name}} requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -Save the license file temporarily to disk and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -### {{site.ee_product_name}} bootstrap password - -Next, we need to create a secret containing the password using which we can login into Kong Manager. -Please replace `cloudnative` with a random password of your choice and note it down. - -```bash -$ kubectl create secret generic kong-enterprise-superuser-password -n kong --from-literal=password=cloudnative -secret/kong-enterprise-superuser-password created -``` - -Once these are created, we are ready to deploy {{site.ee_product_name}} -Ingress Controller. - -## Install - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/single/all-in-one-postgres-enterprise.yaml -``` - -It takes a little while to bootstrap the database. -Once bootstrapped, you should see the {{site.kic_product_name}} running with -{{site.ee_product_name}} as its core: - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-548b9cff98-n44zj 2/2 Running 0 21s -kong-migrations-pzrzz 0/1 Completed 0 4m3s -postgres-0 1/1 Running 0 4m3s -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-admin LoadBalancer 10.63.255.85 34.83.95.105 80:30574/TCP 4m35s -kong-manager LoadBalancer 10.63.247.16 34.83.242.237 80:31045/TCP 4m34s -kong-proxy LoadBalancer 10.63.242.31 35.230.122.13 80:32006/TCP,443:32007/TCP 4m34s -kong-validation-webhook ClusterIP 10.63.240.154 443/TCP 4m34s -postgres ClusterIP 10.63.241.104 5432/TCP 4m34s - -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP assigned to the three LoadBalancer type services. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. If you are running Minikube, you will not get an -external IP address. - -### Setup Kong Manager - -Next, if you browse to the IP address or host of the `kong-manager` service in your Browser, -which in our case is `http://34.83.242.237`. -Kong Manager should load in your browser. -Try logging in to the Manager with the username `kong_admin` -and the password you supplied in the prerequisite, it should fail. -The reason being we've not yet told Kong Manager where it can find the Admin API. - -Let's set that up. We will take the External IP address of `kong-admin` service and -set the environment variable `KONG_ADMIN_API_URI`: - -```bash -KONG_ADMIN_IP=$(kubectl get svc -n kong kong-admin --output=jsonpath='{.status.loadBalancer.ingress[0].ip}') -kubectl patch deployment -n kong ingress-kong -p "{\"spec\": { \"template\" : { \"spec\" : {\"containers\":[{\"name\":\"proxy\",\"env\": [{ \"name\" : \"KONG_ADMIN_API_URI\", \"value\": \"${KONG_ADMIN_IP}\" }]}]}}}}" -``` - -It will take a few minutes to roll out the updated deployment and once the new -`ingress-kong` pod is up and running, you should be able to log into the Kong Manager UI. - -As you follow along with other guides on how to use your newly deployed the {{site.kic_product_name}}, -you will be able to browse Kong Manager and see changes reflected in the UI as Kong's -configuration changes. - -## Using Kong for Kubernetes with {{site.ee_product_name}} - -Let's setup an environment variable to hold the IP address of `kong-proxy` service: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. - -## Customizing by use-case - -The deployment in this guide is a point to start using Ingress Controller. -Based on your existing architecture, this deployment will require custom -work to make sure that it needs all of your requirements. - -In this guide, there are three load-balancers deployed for each of -Kong Proxy, Kong Admin and Kong Manager services. It is possible and -recommended to instead have a single Load balancer and then use DNS names -and Ingress resources to expose the Admin and Manager services outside -the cluster. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/minikube.md b/app/kubernetes-ingress-controller/1.2.x/deployment/minikube.md deleted file mode 100644 index d56a366bb0bd..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/minikube.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Kong Ingress on Minikube ---- - -## Setup Minikube - -1. Install [`minikube`](https://github.com/kubernetes/minikube) - - Minikube is a tool that makes it easy to run Kubernetes locally. - Minikube runs a single-node Kubernetes cluster inside a VM on your laptop - for users looking to try out Kubernetes or develop with it day-to-day. - -1. Start `minikube` - - ```bash - minikube start - ``` - - It will take a few minutes to get all resources provisioned. - - ```bash - kubectl get nodes - ``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -> Note: this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -```bash -$ export PROXY_IP=$(minikube service -n kong kong-proxy --url | head -1) -# If installed by helm, service name would be "-kong-proxy". -# $ export PROXY_IP=$(minikube service -kong-proxy --url | head -1) -$ echo $PROXY_IP -http://192.168.99.100:32728 -``` - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.2.x/deployment/overview.md b/app/kubernetes-ingress-controller/1.2.x/deployment/overview.md deleted file mode 100644 index 5a37579b5a99..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/deployment/overview.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Installing and Configuring ---- - -## Getting started - -If you are getting started with Kong for Kubernetes, -install it on Minikube using our Minikube [setup guide](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/). - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## Overview - -The {{site.kic_product_name}} can be installed on a local, managed -or any Kubernetes cluster which supports a service of type `LoadBalancer`. - -As explained in the [deployment document](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), there -are a variety of configurations and runtimes for the {{site.kic_product_name}}. - -The following sections detail on deployment steps for all the different -runtimes: - -## Kong for Kubernetes - - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s/) to deploy Kong for Kubernetes -using an installation method of your choice. - -## Kong for Kubernetes Enterprise - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) to deploy Kong for Kubernetes -Enterprise if you have purchased or are trying out {{site.ee_product_name}}. - -## Kong for Kubernetes with {{site.ee_product_name}} - -Kong for Kubernetes can integrate with {{site.ee_product_name}} to -provide a single pane of visibility across all of your services -that are running in Kubernetes and non-Kubernetes environments. - -This [guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise/) goes into details of -the architecture and how one can set that up. - -## Admission Controller - -The {{site.kic_product_name}} also ships with a Validating -Admission Controller that -can be enabled to verify KongConsumer, KongPlugin and Secret -resources as they are created. -Please follow the [admission-webhook](/kubernetes-ingress-controller/{{page.release}}/deployment/admission-webhook/) deployment -guide to set it up. diff --git a/app/kubernetes-ingress-controller/1.2.x/faq.md b/app/kubernetes-ingress-controller/1.2.x/faq.md deleted file mode 100644 index 8f401a8b6e3c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/faq.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: FAQs ---- - -### Why endpoints and not services? - -The {{site.kic_product_name}} does not use -[Services][k8s-service] to route traffic -to the pods. Instead, it uses the Endpoints API -to bypass [kube-proxy][kube-proxy] -to allow Kong features like session affinity and -custom load balancing algorithms. -It also removes overhead -such as conntrack entries for iptables DNAT. - -### Is it possible to create consumers using the Admin API? - -From version 0.5.0 onwards, the {{site.kic_product_name}} tags each entity -that it manages inside Kong's database and only manages the entities that -it creates. -This means that if consumers and credentials are created dynamically, they -won't be deleted by the Ingress Controller. - -[k8s-service]: https://kubernetes.io/docs/concepts/services-networking/service -[kube-proxy]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/cert-manager.md b/app/kubernetes-ingress-controller/1.2.x/guides/cert-manager.md deleted file mode 100644 index 471b4b29189c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/cert-manager.md +++ /dev/null @@ -1,372 +0,0 @@ ---- -title: Using cert-manager for automated TLS certificate ---- - -This guide will walk through steps to set up the {{site.kic_product_name}} with -cert-manager to automate certificate management using Let's Encrypt. -Any ACME-based CA can be used in-place of Let's Encrypt as well. - -## Before you begin - -You will need the following: - -- Kubernetes cluster that can provision an IP address that is routable from - the Internet. If you don't have one, you can use GKE or any managed k8s - cloud offering. -- A domain name for which you control the DNS records. - This is necessary so that - Let's Encrypt can verify the ownership of the domain and issue a certificate. - In the current guide, we use `example.com`, please replace this with a domain - you control. - -This tutorial was written using Google Kubernetes Engine. - -## Set up the {{site.kic_product_name}} {#set-up-kic} - -Execute the following to install the Ingress Controller: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.example.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -## Set up cert-manager - -Please follow cert-manager's [documentation](https://cert-manager.io/docs/installation/) -on how to install cert-manager onto your cluster. - -Once installed, verify all the components are running using: - -```bash -kubectl get all -n cert-manager -NAME READY STATUS RESTARTS AGE -pod/cert-manager-86478c5ff-mkhb9 1/1 Running 0 23m -pod/cert-manager-cainjector-65dbccb8b6-6dnjl 1/1 Running 0 23m -pod/cert-manager-webhook-78f9d55fdf-5wcnp 1/1 Running 0 23m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cert-manager-webhook ClusterIP 10.63.240.251 443/TCP 23m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/cert-manager 1 1 1 1 23m -deployment.apps/cert-manager-cainjector 1 1 1 1 23m -deployment.apps/cert-manager-webhook 1 1 1 1 23m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cert-manager-86478c5ff 1 1 1 23m -replicaset.apps/cert-manager-cainjector-65dbccb8b6 1 1 1 23m -replicaset.apps/cert-manager-webhook-78f9d55fdf 1 1 1 23m -``` - -## Set up your application - -Any HTTP-based application can be used, for the purpose of the demo, install -the following echo server: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Set up DNS - -Get the IP address of the load balancer for Kong: - -```bash -$ kubectl get service -n kong kong-proxy -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 35.233.170.67 80:31929/TCP,443:31408/TCP 58d -``` - -To get only the IP address: - -```bash -$ kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy -35.233.170.67 -``` - -Please note that the IP address in your case will be different. - -Next, setup a DNS records to resolve `proxy.example.com` to the -above IP address: - -```bash -$ dig +short proxy.example.com -35.233.170.67 -``` - -Next, setup a CNAME DNS record to resolve `demo.example.com` to -`proxy.example.com`. - -```bash -$ dig +short demo.yolo2.com -proxy.example.com. -35.233.170.67 -``` - -## Expose your application to the Internet - -Setup an Ingress rule to expose the application: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -Access your application: - -```bash -$ curl -I demo.example.com -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 21:14:45 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 -``` - -## Request TLS Certificate from Let's Encrypt - -First, setup a ClusterIssuer for cert-manager - -```bash -$ echo "apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod - namespace: cert-manager -spec: - acme: - email: user@example.com #please change this - privateKeySecretRef: - name: letsencrypt-prod - server: https://acme-v02.api.letsencrypt.org/directory - solvers: - - http01: - ingress: - class: kong" | kubectl apply -f - -clusterissuer.cert-manager.io/letsencrypt-prod configured -``` - -*Note*: If you run into issues configuring this, -be sure that the group (`cert-manager.io`) and -version (`v1`) match those in the output of -`kubectl describe crd clusterissuer`. -This directs cert-manager which CA authority to use to issue the certificate. - -Next, update your Ingress resource to provision a certificate and then use it: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: kong -spec: - tls: - - secretName: demo-example-com - hosts: - - demo.example.com - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-example-com configured -``` - -Things to note here: - -- The annotation `kubernetes.io/tls-acme` is set to `true`, informing - cert-manager that it should provision a certificate for hosts in this - Ingress using ACME protocol. -- `certmanager.k8s.io/cluster-issuer` is set to `letsencrypt-prod`, directing - cert-manager to use Let's Encrypt's production server to provision a TLS - certificate. -- `tls` section of the Ingress directs the {{site.kic_product_name}} to use the - secret `demo-example-com` to encrypt the traffic for `demo.example.com`. - This secret will be created by cert-manager. - -Once you update the Ingress resource, cert-manager will start provisioning -the certificate and in sometime the certificate will be available for use. - -You can track the progress of certificate issuance: - -```bash -$ kubectl describe certificate demo-example-com -Name: demo-example-com -Namespace: default -Labels: -Annotations: -API Version: certmanager.k8s.io/v1 -Kind: Certificate -Metadata: - Creation Timestamp: 2019-06-21T20:41:54Z - Generation: 1 - Owner References: - API Version: extensions/v1beta1 - Block Owner Deletion: true - Controller: true - Kind: Ingress - Name: demo-example-com - UID: 261d15d3-9464-11e9-9965-42010a8a01ad - Resource Version: 19561898 - Self Link: /apis/certmanager.k8s.io/v1/namespaces/default/certificates/demo-example-com - UID: 014d3f1d-9465-11e9-9965-42010a8a01ad -Spec: - Acme: - Config: - Domains: - demo.example.com - Http 01: - Dns Names: - demo.example.com - Issuer Ref: - Kind: ClusterIssuer - Name: letsencrypt-prod - Secret Name: demo-example-com -Status: - Conditions: - Last Transition Time: 2019-06-21T20:42:20Z - Message: Certificate is up to date and has not expired - Reason: Ready - Status: True - Type: Ready - Not After: 2019-09-19T19:42:19Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Generated 53m cert-manager Generated new private key - Normal GenerateSelfSigned 53m cert-manager Generated temporary self signed certificate - Normal OrderCreated 53m cert-manager Created Order resource "demo-example-com-3811625818" - Normal OrderComplete 53m cert-manager Order "demo-example-com-3811625818" completed successfully - Normal CertIssued 53m cert-manager Certificate issued successfully -``` - -## Test HTTPS - -Once all is in place, you can use HTTPS: - -```bash -$ curl -v https://demo.example.com -* Rebuilt URL to: https://demo.example.com/ -* Trying 35.233.170.67... -* TCP_NODELAY set -* Connected to demo.example.com (35.233.170.67) port 443 (#0) -* ALPN, offering h2 -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/cert.pem - CApath: none -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=demo.example.com -* start date: Jun 21 19:42:19 2019 GMT -* expire date: Sep 19 19:42:19 2019 GMT -* subjectAltName: host "demo.example.com" matched cert's "demo.example.com" -* issuer: C=US; O=Let's Encrypt; CN=Let's Encrypt Authority X3 -* SSL certificate verify ok. -> GET / HTTP/1.1 -> Host: demo.example.com -> User-Agent: curl/7.54.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Connection: keep-alive -< Date: Fri, 21 Jun 2019 21:37:43 GMT -< Server: echoserver -< X-Kong-Upstream-Latency: 1 -< X-Kong-Proxy-Latency: 1 -< Via: kong/1.1.2 -< - - -Hostname: echo-d778ffcd8-52ddj - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-9w4t - pod name: echo-d778ffcd8-52ddj - pod namespace: default - pod IP:10.60.2.246 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.2.239 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://demo.example.com:8080/ - -Request Headers: - accept=*/* - connection=keep-alive - host=demo.example.com - user-agent=curl/7.54.0 - x-forwarded-for=10.138.0.6 - x-forwarded-host=demo.example.com - x-forwarded-port=8443 - x-forwarded-proto=https - x-real-ip=10.138.0.6 - -Request Body: - -no body in request- -``` - -Et voilà ! You've secured your API with HTTPS -with the {{site.kic_product_name}} and cert-manager. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/configure-acl-plugin.md b/app/kubernetes-ingress-controller/1.2.x/guides/configure-acl-plugin.md deleted file mode 100644 index 2ef8aecd4249..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/configure-acl-plugin.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -title: Configuring ACL Plugin ---- - -This guide walks through configuring the Kong ACL Plugin. The ACL Plugin -requires the use of at least one Authentication plugin. This example will use -the JWT Auth Plugin - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create two Ingress rules to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Test the Ingress rules: - -```bash -$ curl -i $PROXY_IP/get -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -``` - -## Add JWT authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. Let's enable JWT authentication - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: app-jwt -plugin: jwt -" | kubectl apply -f - -``` - -Now let's associate the plugin to the Ingress rules we created earlier. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any requests matching the proxying rules for `demo-get` and `demo` post will -now require a valid JWT and the consumer for the JWT to be associate with the -right ACL. - -```bash -$ curl -i $PROXY_IP/get - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} -``` - -You should get a 401 response telling you that the request is not authorized. - -## Provision Consumers - -Let's provision 2 KongConsumer resources: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -" | kubectl apply -f - -``` - -## Secrets - -Next, let's provision some Secrets for the KongConsumers to reference. Each -ACL will need its own Secret and each JWT public key will need its own Secret. -The credential type is specified in the `kongCredType` field. In this -case we'll be using `jwt` and `acl`. You can create a secret using any other -method as well. - -The JWT signing algorithm is set in the `algorithm` field. The if using a -public key like this example it is stored in the `rsa_pulic_key` field. If you -are using a secret signing key, use the `secret` field. The last field to set -if you are using `RS256` or `ES256` is the `key` field. This should match the -`iss` field in the JWT you will be sending. You can check this value by -decoding your JWT over at [https://jwt.io](https://jwt.io) - -Since we are using the Secret resource, Kubernetes will encrypt and store the -JWT signing key and ACL group for us. - -### JWT signing key - -```bash -# create secret for jwt public key -$ kubectl create secret \ - generic app-admin-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="admin-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - MIIBIjA.... - -----END PUBLIC KEY-----" - -# create a second secret with a different key -$ kubectl create secret \ - generic app-user-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="user-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - qwerlkjqer.... - -----END PUBLIC KEY-----" -``` - -Validation requirements impose that even if the `secret` is not used for algorithm -`RS256` or `ES256` the field `secret` must be present, so put some dummy value for it. - -## Assign the credentials - -In order to for the ACL and JWT to be validated by Kong, the secrets will need -to be referenced by the KongConsumers we created earlier. Let's update those. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt -" | kubectl apply -f - -``` - -## Use the credential - -Now to use a JWT to pass authentication. Let's store the user and admin jwt's -in some environment variables. `USER_JWT` and `ADMIN_JWT`. If you are using -an identity provider, you should be able to login and get out a JWT from their -API. If you are generating your own, go through the process of generating your -own. - -Let's test the get route - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - - -``` - -## Adding ACL's - -The JWT plugin doesn't provide the ability to authroize a given issuer to a -given ingress. To do this we need to use the ACL plugin. Let's create an admin -ACL config - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: admin-acl -plugin: acl -config: - whitelist: ['app-admin'] -" | kubectl apply -f - -``` - -Then let's create a user ACL config. We want our admin to be able to access -the same resources as the user, so let's make sure we include them in the -whitelist. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: plain-user-acl -plugin: acl -config: - whitelist: ['app-user','app-admin'] -" | kubectl apply -f - -``` - -Next let's create the secrets that will define the ACL groups. - -```bash -# create secrets for acl groups -$ kubectl create secret \ - generic app-admin-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-admin - -$ kubectl create secret \ - generic app-user-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-user -``` - -After we create the secrets, the consumers need to be updated to reference the -ACL credentials - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt - - app-admin-acl -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt - - app-user-acl -" | kubectl apply -f - -``` - -The last thing to configure is the ingress to use the new plguins. Note, if you -set more than one ACL plugin, the last one supplied will be the only one -evaluated. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt,plain-user-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt,admin-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Now let's test it. - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-user", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post -HTTP/1.1 403 Forbidden -Date: Mon, 06 Apr 2020 07:11:59 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 45 -X-Kong-Response-Latency: 1 -Server: kong/2.0.2 - -{"message":"You cannot consume this service"} -``` - -The `plain-user` user is not in the `admin-acl` whitelist, and is therefore -unauthorized to access the resource - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 1156 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 07:20:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 4 -X-Kong-Proxy-Latency: 4 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} -``` diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-custom-entities.md b/app/kubernetes-ingress-controller/1.2.x/guides/configuring-custom-entities.md deleted file mode 100644 index 747286a87864..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-custom-entities.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Configuring Custom Entities ---- - -This is an **advanced-level** guide for users using custom entities in Kong. -Most users do not need to use this feature. - -Kong has in-built extensibility with its plugin architecture. -Plugins in Kong have a `config` property where users can store configuration -for any custom plugin and this suffices in most use cases. -In some use cases, plugins define custom entities to store additional -configuration outside the plugin instance itself. -This guide elaborates on how such custom entities can be used with the Kong -Ingress Controller. - -> Note: All entities shipped with Kong are supported by the -{{site.kic_product_name}}out of the box. This guide applies only if you have a -custom entity in your plugin. To check if your plugin contains a custom entity, -the source code will usually contain a `daos.lua` file. -Custom plugins have first-class support in the {{site.kic_product_name}} -via the `KongPlugin` CRD. -Please read [the custom plugin guide](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) instead -if you are only using Custom plugins. - -## Caveats - -- The feature discussed in this guide apply for DB-less deployments of Kong. - The feature is not supported for deployments where Kong is used with a - database or Kong is used in hybrid mode. - For these deployments, configure custom entities directly using Kong's Admin - API. -- Custom entities which have a foreign relation with other core entities in Kong - are not supported. Only entities which can exist by themselves and then - be referenced via plugin configuration are supported. - -## Creating a JSON representation of the custom entity - -In this section, we will learn how to create a JSON representation of -a custom entity. - -Suppose you have a custom entity with the following schema in your plugin source: - -```lua -{ - name = "xkcds", - primary_key = { "id" }, - cache_key = { "name" }, - endpoint_key = "name", - fields = { - { id = typedefs.uuid }, - { - name = { - type= "string", - required = true, - unique = true, - }, - }, - { - url = { - type = "string", - required = true, - }, - }, - { created_at = typedefs.auto_timestamp_s }, - { updated_at = typedefs.auto_timestamp_s }, - }, -} -``` - -An instance of such an entity would look like: - -```json -{ - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "Bobby Drop Tables", - "url": "https://xkcd.com/327/" -} -``` - -Multiple instances of such an entity are represented as follows: - -```json -{ - "xkcds": [ - { - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "bobby_tables", - "url": "https://xkcd.com/327/" - }, - { - "id": "d079a632-ac8d-4a9a-860c-71de82e8fc11", - "name": "compiling", - "url": "https://xkcd.com/303/" - } - ] -} -``` - -If you have more than one custom entities that you would like to configure -then you can create other entities by specifying the entity name at the root -level of the JSON as the key and then a JSON array containing the -custom entities as the value of the key. - -To configure custom entities in a DB-less instance of Kong, -you first need to create such a JSON representation of your entities. - -## Configuring the custom entity secret - -Once you have the JSON representation, we need to store the configuration -inside a Kubernetes Secret. -The following command assumes the filename to be `entities.json` but you can -use any other filename as well: - -```bash -$ kubectl create secret generic -n kong kong-custom-entities --from-file=config=entities.json -secret/kong-custom-entities created -``` - -Some things to note: -- The key inside the secret must be `config`. This is not configurable at the - moment. -- The secret must be accessible by the Ingress Controller. The recommended - practice here is to install the secret in the same namespace in which Kong - is running. - -## Configure the Ingress Controller - -Once you have the secret containing the custom entities configured, -you need to instruct the controller to read the secret and sync the custom -entities to Kong. - -To do this, you need to add the following environment variable to the -`ingress-ccontroller` container: - -```yaml -env: -- name: CONTROLLER_KONG_CUSTOM_ENTITIES_SECRET - value: kong/kong-custom-entities -``` - -This value of the environment variable takes the form of `/`. -You need to configure this only once. - -This instructs the controller to watch the above secret and configure Kong -with any custom entities present inside the secret. -If you change the configuration and update the secret with different entities, -the controller will dynamically fetch the updated secret and configure Kong. - -## Verification - -You can verify that the custom entity was actually created in Kong's memory -using the `GET /xkcds` (endpoint will differ based on the name of the entity) -on Kong's Admin API. -You can forward traffic from your local machine to the Kong Pod to access it: - -```bash -$ kubectl port-forward -n kong KONG-POD-NAME 8444:8444 -``` - -and in a separate terminal: - -```bash - $ curl -k https://localhost:8444/ -``` - -## Using the custom entity - -You can now use reference the custom entity in any of your custom plugin's -`config` object: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: random-xkcd-header -config: - xkcds: - - d079a632-ac8d-4a9a-860c-71de82e8fc11 -plugin: xkcd-header -``` diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-fallback-service.md b/app/kubernetes-ingress-controller/1.2.x/guides/configuring-fallback-service.md deleted file mode 100644 index 46d238e4b6ad..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-fallback-service.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Configuring a fallback service ---- - -This guide walks through how to setup a fallback service using Ingress -resource. The fallback service will receive all requests that don't -match against any of the defined Ingress rules. -This can be useful for scenarios where you would like to return a 404 page -to the end user if the user clicks on a dead link or inputs an incorrect URL. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup a simple HTTP service in the -cluster and proxy it. - -```bash -$ echo ' -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fallback-svc -spec: - replicas: 1 - selector: - matchLabels: - app: fallback-svc - template: - metadata: - labels: - app: fallback-svc - spec: - containers: - - name: fallback-svc - image: hashicorp/http-echo - args: - - "-text" - - "This is not the path you are looking for. - Fallback service" - ports: - - containerPort: 5678 ---- -apiVersion: v1 -kind: Service -metadata: - name: fallback-svc - labels: - app: fallback-svc -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 5678 - protocol: TCP - name: http - selector: - app: fallback-svc -' | kubectl apply -f - -``` - -Result: - -```bash -deployment.apps/fallback-svc created -service/fallback-svc created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup a fallback service - -Let's deploy another sample service service: - -```bash -$ kubectl apply -f https://bit.ly/fallback-svc -deployment.extensions/fallback-svc created -service/fallback-svc created -``` - -Next, let's set up an Ingress rule to make it the fallback service -to send all requests to it that don't match any of our Ingress rules: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: fallback - annotations: - kubernetes.io/ingress.class: kong -spec: - backend: - serviceName: fallback-svc - servicePort: 80 -" | kubectl apply -f - -``` - -## Test it - -Now send a request with a request property that doesn't match against -any of the defined rules: - -```bash -$ curl $PROXY_IP/random-path -This is not the path you are looking for. - Fallback service -``` - -The above message comes from the fallback service that was deployed in the -last step. - -Create more Ingress rules, some complicated regex based ones and -see how requests that don't match any rules, are forwarded to the -fallback service. - -You can also use Kong's request-termination plugin on the `fallback` -Ingress resource to terminate all requests at Kong, without -forwarding them inside your infrastructure. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-health-checks.md b/app/kubernetes-ingress-controller/1.2.x/guides/configuring-health-checks.md deleted file mode 100644 index 875525104609..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-health-checks.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: Setting up Active and Passive health checks ---- - -In this guide, we will go through steps necessary to setup active and passive -health checking using the {{site.kic_product_name}}. This configuration allows -Kong to automatically short-circuit requests to specific Pods that are -mis-behaving in your Kubernetes Cluster. - -> Please make sure to use {{site.kic_product_name}} >= 0.6 as the previous -versions contain a [bug](https://github.com/hbagdi/go-kong/issues/6). - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy any requests yet. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Observe the headers and you can see that Kong has proxied the request correctly. - -## Setup passive health checking - -Now, let's setup passive HTTP health-check for our service. -All health-checking is done at Service-level and not Ingress-level. - -Add the following KongIngress resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking created -``` - -Here, we are configuring Kong to short-circuit requests to a pod -if a pod throws 3 consecutive errors. - -Next, associate the KongIngress resource with `httpbin` service: - -```bash -$ kubectl patch svc httpbin -p '{"metadata":{"annotations":{"konghq.com/override":"demo-health-checking"}}}' -service/httpbin patched -``` - -Now, let's send some traffic to test if this works: - -Let's send 2 requests that represent a failure from upstream -and then send a request for 200. -Here we are using `/status/500` to simulate a failure from upstream. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Kong has not short-circuited because there were only two failures. -Let's send 3 requests and open the circuit, and then send a normal request. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 22:41:19 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} - -``` - -As we can see, Kong returns back a 503, representing that the service is -unavailable. Since we have only one pod of httpbin running in our cluster, -and that is throwing errors, Kong will not proxy anymore requests. - -Now we have a few options: - -- Delete the current httpbin pod; Kong will then proxy requests to the new - pod that comes in its place. -- Scale the httpbin deployment; Kong will then proxy requests to the new - pods and leave the short-circuited pod out of the loop. -- Manually change the pod health status in Kong using Kong's Admin API. - -These options highlight the fact that once a circuit is opened because of -errors, there is no way for Kong to close the circuit again. - -This is a feature which some services might need, where once a pod starts -throwing errors, manual intervention is necessary before that pod can -again handle requests. -To get around this, we can introduce active health-check, where each instance -of Kong actively probes pods to figure out if they are healthy or not. - -## Setup active health checking - -Let's update our KongIngress resource to use active health-checks: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - active: - healthy: - interval: 5 - successes: 3 - http_path: /status/200 - type: http - unhealthy: - http_failures: 1 - interval: 5 - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking configured -``` - -Here, we are configuring Kong to actively probe `/status/200` every 5 seconds. -If a pod is unhealthy (from Kong's perspective), -3 successful probes will change the status of the pod to healthy and Kong -will again start to forward requests to that pod. - -Now, the requests should flow once again: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Let's trip the circuit again by sending three requests that will return -500s from httpbin: - -```bash -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -``` - -Now, sending the following request will fail for about 15 seconds, -the duration it will take active healthchecks to re-classify -the httpbin pod as healthy again. - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 23:17:47 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} -``` - -After 15 seconds, you will see: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As we can see, active health-checks automatically marked a pod as healthy -when passive health-checks marked it unhealthy. - -## Bonus - -Scale the `httpbin` and `ingress-kong` deployments and observe how -multiple pods change the outcome of the above demo. - -Read more about health-checks and ciruit breaker in Kong's -[documentation](/gateway/latest/reference/health-checks-circuit-breakers). diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-https-redirect.md b/app/kubernetes-ingress-controller/1.2.x/guides/configuring-https-redirect.md deleted file mode 100644 index b54fb8e41a2f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/configuring-https-redirect.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Configuring https redirect ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -redirect HTTP request to HTTPS so that all communication -from the external world to your APIs and microservices is encrypted. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup HTTPS redirect - -To instruct Kong to redirect all HTTP requests matching this Ingress rule to -HTTPS, update its annotations to limit its protocols to HTTPS only and -issue a 301 redirect: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"https","konghq.com/https-redirect-status-code":"301"}}}' -ingress.extensions/demo patched -``` - -## Test it - -Now, making a plain-text HTTP request to Kong will result in a redirect -being issued from Kong: - -```bash -$ curl $PROXY_IP/foo/headers -I -HTTP/1.1 301 Moved Permanently -Date: Tue, 06 Aug 2019 18:04:38 GMT -Content-Type: text/html -Content-Length: 167 -Connection: keep-alive -Location: https://35.197.125.63/foo/headers -Server: kong/1.2.1 -``` - -The `Location` header will contain the URL you need to use for an HTTPS -request. Please note that this URL will be different depending on your -installation method. You can also grab the IP address of the load balancer -fronting Kong and send a HTTPS request to test it. - -Let's test it: - -```bash -$ curl -k https://35.197.125.63/foo/headers -{ - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Host": "35.197.125.63", - "User-Agent": "curl/7.54.0", - "X-Forwarded-Host": "35.197.125.63" - } -} -``` - -We can see that Kong correctly serves the request only on HTTPS protocol -and redirects the user if plaint-text HTTP protocol is used. -We had to use `-k` flag in cURL to skip certificate validation as the -certificate served by Kong is a self-signed one. -If you are serving this traffic via a domain that you control and have -configured TLS properties for it, then the flag won't -be necessary. - -If you have a domain that you control but don't have TLS/SSL certificates -for it, please check out out -[Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager) guide which can get TLS -certificates setup for you automatically. And it's free, thanks to -Let's Encrypt! diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/getting-started-istio.md b/app/kubernetes-ingress-controller/1.2.x/guides/getting-started-istio.md deleted file mode 100644 index 19ef1a26341d..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/getting-started-istio.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Running the Kong Ingress Controller with Istio ---- - -In this guide, you will: -* Install Istio v1.6.7 and Kong in your cluster. -* Deploy an example Istio-enabled application (_bookinfo_). -* Deploy an `Ingress` customized with a `KongPlugin` for the example application. -* Make several requests to the sample application via Kong and Istio. -* See the performance metrics of the sample application, provided by Istio. - -### Prerequisites -For this guide, you will need: - -* A Kubernetes v1.15 (or newer) cluster which can pull container images from public registries. For example, you can use: - * A managed Kubernetes cluster (AWS EKS, Google Cloud GKE, Azure AKS). - * Minikube. - * `microk8s` with the `dns` addon enabled. -* `kubectl` with admin access to the cluster. - -### Download Istio - -Download the Istio bundle at version 1.6.7: - -```console -$ curl -L https://istio.io/downloadIstio | env ISTIO_VERSION=1.6.7 sh - -... -... -Istio 1.6.7 Download Complete! - -Istio has been successfully downloaded into the istio-1.6.7 folder on your system. -... -... -``` - -### Install Istio Operator - -Invoke `istioctl` to deploy the Istio Operator to the Kubernetes cluster: - -```console -$ ./istio-1.6.7/bin/istioctl operator init -Using operator Deployment image: docker.io/istio/operator:1.6.7 -✔ Istio operator installed -✔ Installation complete -``` - -### Deploy Istio using Operator - -Deploy Istio using Istio Operator: - -```console -$ kubectl create namespace istio-system -namespace/istio-system created -``` -```console -$ kubectl apply -f - < 8000 -Forwarding from [::1]:8080 -> 8000 -... -``` - -Navigate your web browser to `http://localhost:8080/` You should be able to see a bookstore web application. Click -through any available links several times. As you hit 30 requests per minute (for example, by holding down the "Refresh" -key combination, e.g. `` or ``), you should obtain a `Kong Error - API rate limit exceeded` response. - -### See the connection graph in Kiali - -Connect to Kiali (the Istio dashboard): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/kiali 20001:20001 -n istio-system -Forwarding from 127.0.0.1:20001 -> 20001 -Forwarding from [::1]:20001 -> 20001 -... -``` - -* Navigate your web browser to `http://localhost:20001/`. -* Log in using the default credentials (`admin`/`admin`). -* Choose _Workloads_ from the menu on the left. -* Select `my-istio-app` in the _Namespace_ drop-down menu. -* Click the _productpage-v1_ service name. -* Click the three dots button in the top-right corner of _Graph Overview_ and click _Show full graph_. -* Select `kong-istio` alongside `my-istio-app` in the _Namespace_ diagram. -* Observe a connection graph spanning from `example-kong-kong-proxy` through `productpage-v1` to the other sample -application services such as `ratings-v1` and `details-v1`. - -### See the metrics in Grafana - -Connect to Grafana (a dashboard frontend for Prometheus which has been deployed with Istio): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/grafana 3000:3000 -n istio-system -Forwarding from 127.0.0.1:3000 -> 3000 -Forwarding from [::1]:3000 -> 3000 -... -``` - -* Navigate your web browser to `http://localhost:3000/`. -* Expand the dashboard selection drop-down menu from the top of the screen. Expand the `istio` directory and choose the -_Istio Workload Dashboard_ from the list. -* Choose _Namespace: my-istio-app_ and _Workload: productpage-v1_ from the drop-downs. -* Choose a timespan in the top-right of the page to include the time when you made requests to the sample application (e.g. _Last 1 hour_). -* Observe the incoming and outgoing request graphs reflecting actual requests from Kong to `productpage-v1`, and from `productpage-v1` to its backends. - -Note that the requests from the web browser to Kong are not reflected in inbound stats of `example-kong-kong-proxy` -because we've issued these requests by `kubectl port-forward`, thus bypassing the Istio proxy sidecar in Kong. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/getting-started.md b/app/kubernetes-ingress-controller/1.2.x/guides/getting-started.md deleted file mode 100644 index 2de9ca9d32fc..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/getting-started.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Getting started with the Kong Ingress Controller ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return back -a HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.1.2 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy the request yet. - -## Set up an echo-server - -Setup an echo-server application to demonstrate how -to use the {{site.kic_product_name}}: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -This application just returns information about the -pod and details from the HTTP request. - -## Basic proxy - -Create an Ingress rule to proxy the echo-server created previously: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 17:12:49 GMT -Server: echoserver -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-txt52 - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-txt52 - pod namespace: default - pod IP: 172.17.0.14 -<-- clipped --> -``` - -If everything is deployed correctly, you should see the above response. -This verifies that Kong can correctly route traffic to an application running -inside Kubernetes. - -## Using plugins in Kong - -Setup a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -kongplugin.configuration.konghq.com/request-id created -``` - -Create a new Ingress resource which uses this plugin: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -The above resource directs Kong to execute the request-id plugin whenever -a request is proxied matching any rule defined in the resource. - -Send a request to Kong: - -```bash -$ curl -i -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:09:02 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-cnfmx - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-cnfmx - pod namespace: default - pod IP: 172.17.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=172.17.0.2 - method=GET - real path=/bar/sample - query= - request_version=1.1 - request_scheme=http - request_uri=http://example.com:8080/bar/sample - -Request Headers: - accept=*/* - connection=keep-alive - host=example.com - my-request-id=7250803a-a85a-48da-94be-1aa342ca276f#6 - user-agent=curl/7.54.0 - x-forwarded-for=172.17.0.1 - x-forwarded-host=example.com - x-forwarded-port=8000 - x-forwarded-proto=http - x-real-ip=172.17.0.1 - -Request Body: - -no body in request- -``` - -The `my-request-id` can be seen in the request received by echo-server. -It is injected by Kong as the request matches one -of the Ingress rules defined in `demo-example-com` resource. - -## Using plugins on Services - -Kong Ingress allows plugins to be executed on a service level, meaning -Kong will execute a plugin whenever a request is sent to a specific k8s service, -no matter which Ingress path it came from. - -Create a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: rl-by-ip -config: - minute: 5 - limit_by: ip - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/rl-by-ip created -``` - -Next, apply the `konghq.com/plugins` annotation on the Kubernetes Service -that needs rate-limiting: - -```bash -kubectl patch svc echo \ - -p '{"metadata":{"annotations":{"konghq.com/plugins": "rl-by-ip\n"}}}' -``` - -Now, any request sent to this service will be protected by a rate-limit -enforced by Kong: - -```bash -$ curl -I $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:25:49 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 2 -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 4 -Via: kong/1.1.2 - -$ curl -I -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:28:30 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 2 -Via: kong/1.1.2 -``` - -## Result - -This guide sets up the following configuration: - -```text -HTTP requests with /foo -> Kong enforces rate-limit -> echo server - -HTTP requests with /bar -> Kong enforces rate-limit + -> echo-server - on example.com injects my-request-id header -``` diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/overview.md b/app/kubernetes-ingress-controller/1.2.x/guides/overview.md deleted file mode 100644 index ca89c71096df..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/overview.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Guides ---- - -Follow one of the guides to learn more about how to use -the {{site.kic_product_name}}: - -- [Getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started/) with the {{site.kic_product_name}} -- [Getting started using Istio](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started-istio/) with the {{site.kic_product_name}} and Istio -- [Using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins in Kong using a declarative - approach. -- [Using KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource/) - This guide explains how the KongIngress resource can be used to change Kong - specific settings like load-balancing, health-checking and proxy behaviour. -- [Using KongConsumer and Credential Resources](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) - This guide walks through how Kubernetes native declarative configuration - can be used to dynamically provision credentials for authentication purposes - in the Ingress layer. -- [Using JWT and ACL KongPlugin resources](/kubernetes-ingress-controller/{{page.release}}/guides/configure-acl-plugin/) - This guides walks you through configuring the JWT plugin and ACL plugin for - authentication purposes at the Ingress layer -- [Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager/) - This guide walks through how to use cert-manager along with Kong Ingress - Controller to automate TLS certificate provisioning and using them - to encrypt your API traffic. -- [Configuring a fallback service](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-fallback-service/) - This guide walks through how to setup a fallback service using Ingress - resource. The fallback service will receive all requests that don't - match against any of the defined Ingress rules. -- [Using external service](/kubernetes-ingress-controller/{{page.release}}/guides/using-external-service/) - This guide shows how to expose services running outside Kubernetes via Kong, - using [External Name](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) - Services in Kubernetes. -- [Configuring HTTPS redirects for your services](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-https-redirect/) - This guide walks through how to configure the {{site.kic_product_name}} to - redirect HTTP request to HTTPS so that all communication - from the external world to your APIs and microservices is encrypted. -- [Using Redis for rate-limiting](/kubernetes-ingress-controller/{{page.release}}/guides/redis-rate-limiting/) - This guide walks through how to use Redis for storing rate-limit information - in a multi-node Kong deployment. -- [Integrate the {{site.kic_product_name}} with Prometheus/Grafana](/kubernetes-ingress-controller/{{page.release}}/guides/prometheus-grafana/) - This guide walks through the steps of how to deploy the {{site.kic_product_name}} - and Prometheus to obtain metrics for the traffic flowing into your - Kubernetes cluster. -- [Configuring circuit-breaker and health-checking](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-health-checks/) - This guide walks through the usage of Circuit-breaking and health-checking - features of the {{site.kic_product_name}}. -- [Setting up custom plugin](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) - This guide walks through - installation of a custom plugin into Kong using - ConfigMaps and Volumes. -- [Using ingress with gRPC](/kubernetes-ingress-controller/{{page.release}}/guides/using-ingress-with-grpc/) - This guide walks through how to use the {{site.kic_product_name}} with gRPC. -- [Setting up upstream mTLS](/kubernetes-ingress-controller/{{page.release}}/guides/upstream-mtls/) - This guide gives an overview of how to setup mutual TLS authentication - between Kong and your upstream server. -- [Preserving Client IP address](/kubernetes-ingress-controller/{{page.release}}/guides/preserve-client-ip/) - This guide gives an overview of different methods to preserve the Client - IP address. -- [Using KongClusterPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins that can be shared across - Kubernetes namespaces. -- [Using Kong with Knative](/kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/) - This guide gives an overview of how to setup Kong as the Ingress point - for Knative workloads. -- [Exposing TCP-based service](/kubernetes-ingress-controller/{{page.release}}/guides/using-tcpingress/) - This guide gives an overview of how to use TCPIngress resource to expose - non-HTTP based services outside a Kubernetes cluster. -- [Using mtls-auth plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-mtls-auth-plugin/) - This guide gives an overview of how to use `mtls-auth` plugin and CA - certificates to authenticate requests using client certificates. -- [Using OpenID-connect plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-oidc-plugin/) - This guide walks through steps necessary to set up OIDC authentication. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/preserve-client-ip.md b/app/kubernetes-ingress-controller/1.2.x/guides/preserve-client-ip.md deleted file mode 100644 index 0a76e545bbf5..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/preserve-client-ip.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Preserving Client IP Address ---- - -Kong is usually deployed behind a Load Balancer (using a -Kubernetes Service of type `LoadBalancer`). This can result -in loss of actual Client IP address and Kong observing the IP address -of the Load Balancer as the client IP address. This guide lays -out different methods of solving this problem. - -Preserving the Client IP address in cloud behind Load-Balancers requires -configuration that is be specific to your use-case, cloud provider -and other architecture details. -[This document](https://kubernetes.io/docs/tutorials/services/source-ip/) -provides details on how networking works inside Kubernetes and explains -in detail of how various methods describe later in this document work. -It is recommended that you give this a read. - -Following methods are possible to preserve Client IP address: - -## ExternalTrafficPolicy: Local - -As explained in -[Kubernetes docs](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), -setting `service.spec.externalTrafficPolicy` to `Local` preserves the Client -IP address. You don't need to change any configuration in Kong if you -are using this method to preserve Client IP address. - -Please note that this is not supported by all of the public Cloud providers. - -## Proxy Protocol - -If you have an L4 Load Balancer that supports Proxy Protocol, and you're -terminating TCP connections at the Load Balancer before passing traffic -onward to Kong, then you can configure Kong to pick up the Client IP -address via this protocol. - -Once you have configured the Load Balancer to use Proxy Protocol, you -need to set the following environment variables in Kong for Kong to -receive the Client IP from the Proxy Protocol header. - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_PROXY_LISTEN`](/gateway/latest/reference/configuration/#proxy_listen) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) - -For example: - -``` -KONG_TRUSTED_IPS=0.0.0.0/0,::/0 # This trusts all IPs -KONG_PROXY_LISTEN="0.0.0.0:8000 proxy_protocol, 0.0.0.0:8443 ssl proxy_protocol" -KONG_REAL_IP_HEADER=proxy_protocol -``` - -## HTTP headers - -If you are using an L7 Load Balancer, i.e. HTTP requests are being terminated -at the Load Balancer, then you need to use `x-forwarded-for` or `x-real-ip` -header to preserve details of the connection between the Client and Load Balancer. - -You should configure the Load Balancer to inject these headers, and then -you need to set the following environment variables in Kong for Kong to pick up -the Client IP address from HTTP headers: - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) -- Optional [`KONG_REAL_IP_RECURSIVE`](/gateway/latest/reference/configuration/#real_ip_recursive) - -Please note that if you are using an L7 Load Balancer with Kong, -you cannot use the `certificates` feature in Kong as the TLS session is -already established between the Client and Load Balancer. - -## Cloud-provider specific details - -For the major public clouds, follow are some additional -details that can help you preserve the client IP address: - -### GKE - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### AKS - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### EKS - -You have two options: - -- L4 Load Balancer - In this case, you need to use the Proxy Protocol method to preserve Client IP - address. -- L7 Load Balancer - In this case, you need to use the HTTP headers method to preserve the Client - IP address. - -The recommend Load Balancer type for AWS is NLB. -You can choose the type of Load Balancer using the following annotation: - -``` -service.beta.kubernetes.io/aws-load-balancer-type: nlb -``` - -Other useful annotations for AWS are: - -``` -service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp -service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' -``` - -A complete list can be found -[here](https://gist.github.com/mgoodness/1a2926f3b02d8e8149c224d25cc57dc1). diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/prometheus-grafana.md b/app/kubernetes-ingress-controller/1.2.x/guides/prometheus-grafana.md deleted file mode 100644 index bf6d82958273..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/prometheus-grafana.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: Integrate the Kong Ingress Controller with Prometheus/Grafana ---- - -The {{site.kic_product_name}} can give you visibility not only into how Kong is -performing but also gives visibilty into -how the services in your Kubernetes cluster are responding to the -inbound traffic. - -This how-to guide walks through the steps of how to configure Kong and -Prometheus to collect metrics from the {{site.kic_product_name}}. - -> Note: This guide was originally posted on Kong Inc's blog: -[https://konghq.com/blog/observability-kubernetes-kong/](https://konghq.com/blog/observability-kubernetes-kong) - -## Prerequisites - -You’ll need a few things before we can start: - -- **Kubernetes cluster**: You can use Minikube or a GKE cluster for the - purpose of this tutorial. We are running a GKE Kubernetes cluster v1.12.x. -- **Helm**: We will be using [Helm](https://helm.sh/) - to install all of our components. - Tiller should be installed on your k8s cluster and - Helm CLI should be available on your workstation. - You can follow Helm’s quickstart guide to set up helm. - -Once you have Kubernetes and Helm set up, please proceed. - -Caution: Settings here are tweaked to keep this guide simple. -These settings are not meant for production usage. - -## Install Prometheus and Grafana - -If you already have Prometheus and Grafana installed on your Kubernetes cluster, -you can skip these steps. - -### Prometheus - -First, we will install Prometheus with a -scrape interval of 10 seconds to have fine-grained data points for all metrics. -We’ll install both Prometheus and Grafana in a dedicated `monitoring` namespace. - -To install Prometheus, execute the following: - -```bash -$ kubectl create namespace monitoring -$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -$ helm install prometheus prometheus-community/prometheus --namespace monitoring --values https://bit.ly/2RgzDtg --version 11.0.3 -``` - -### Grafana - -Grafana is installed with the following values for its Helm chart -(see comments for explanation): - -```yaml -persistence: - enabled: true # enable persistence using Persistent Volumes -datasources: - datasources.yaml: - apiVersion: 1 - Datasources: # configure Grafana to read metrics from Prometheus - - name: Prometheus - type: prometheus - url: http://prometheus-server # Since Prometheus is deployed in - access: proxy # same namespace, this resolves - # to the Prometheus Server we installed previous - isDefault: true # The default data source is Prometheus - -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' # Configure a dashboard provider file to - orgId: 1 # put Kong dashboard into. - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default -dashboards: - default: - kong-dash: - gnetId: 7424 # Install the following Grafana dashboard in the - revision: 5 # instance: https://grafana.com/dashboards/7424 - datasource: Prometheus -``` - -To install Grafana, execute the following: - -```bash -$ helm repo add grafana https://grafana.github.io/helm-charts -$ helm install grafana grafana/grafana --namespace monitoring --values http://bit.ly/2FuFVfV --version 5.0.8 -``` - -## Install Kong - -We will use Kong's Helm chart to install Kong -but you can also use plain manifests for this purpose. - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ kubectl create namespace kong -$ helm install mykong kong/kong --namespace kong --values https://bit.ly/2UAv0ZE -``` - -### Enable Prometheus plugin in Kong - -We will enable the Promtheus plugin in Kong at the global level, meaning -each request that flows into the Kubernetes cluster gets tracked in Prometheus: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: prometheus - annotations: - kubernetes.io/ingress.class: kong - labels: - global: "true" -plugin: prometheus -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/prometheus created -``` - -## Set Up Port Forwards - -Now, we will gain access to the components we just deployed. -In a production environment, you would have a Kubernetes Service with -an external IP or load balancer, which would allow you to access -Prometheus, Grafana, and Kong. -For demo purposes, we will set up port-forwarding using kubectl to get access. -It is not advisable to do this in production. - -Open a new terminal and execute the following commands: - -```bash -POD_NAME=$(kubectl get pods --namespace monitoring -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 9090 & - -# You can access Prometheus in your browser at localhost:9090 - -POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 3000 & - -# You can access Grafana in your browser at localhost:3000 - -POD_NAME=$(kubectl get pods --namespace kong -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace kong port-forward $POD_NAME 8000 & - -# Kong proxy port is now your localhost 8000 port -# We are using plain-text HTTP proxy for this purpose of -# demo. -# You can also use the LoadBalancer IP address and not set up this -# port-forwarding if you are running in a cloud environment. -``` - -## Access Grafana Dashboard - -To access Grafana, you need to get the password for the admin user. - -Execute the following to read the password and take note of it: - -```bash -kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo -``` - -Now, browse to [http://localhost:3000](http://localhost:3000) and -fill in username as “admin” and password as what you just noted above. -You should be logged in to Grafana and Kong’s Grafana Dashboard -should already be installed for you. - -## Setup Services - -We have all the components for monitoring installed, -we will now spin up some services for demo purposes and setup Ingress -routing for them. - -### Install Services - -We will set up three services: billing, invoice, and comments. -Execute the following to spin these services up: - -```bash -kubectl apply -f https://gist.githubusercontent.com/hbagdi/2d8ef66fe22cb99e1514f410f992268d/raw/a03d789b70c46ccd0b99d9f1ed838dc21419fc33/multiple-services.yaml -``` - -### Install Ingress for the Services - -Next, once the services are up and running, we will create Ingress -routing rules in Kubernetes. -This will configure Kong to proxy traffic destined for these services correctly. - -Execute the following: - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: sample-ingresses - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /billing - backend: - serviceName: billing - servicePort: 80 - - path: /comments - backend: - serviceName: comments - servicePort: 80 - - path: /invoice - backend: - serviceName: invoice - servicePort: 80 -' | kubectl apply -f - -``` - -## Let’s Create Some Traffic - -We’re done configuring our services and proxies. -Time to see if our setup works. -Execute the following in a new terminal: - -```bash -while true; -do - curl http://localhost:8000/billing/status/200 - curl http://localhost:8000/billing/status/501 - curl http://localhost:8000/invoice/status/201 - curl http://localhost:8000/invoice/status/404 - curl http://localhost:8000/comments/status/200 - curl http://localhost:8000/comments/status/200 - sleep 0.01 -done -``` - -Since we have already enabled Prometheus plugin in Kong to -collect metrics for requests proxied via Kong, -we should see metrics coming through in the Grafana dashboard. - -You should be able to see metrics related to the traffic flowing -through our services. -Try tweaking the above script to send different traffic patterns -and see how the metrics change. -The upstream services are httpbin instances, meaning you can use -a variety of endpoints to shape your traffic. - -## Metrics collected - -### Request Latencies of Various Services - -![Request latencies](/assets/images/products/kubernetes-ingress-controller/request-latencies.png) - -Kong collects latency data of how long your services take to respond to -requests. One can use this data to alert the on-call engineer if the latency -goes beyond a certain threshold. For example, let’s say you have an SLA -that your APIs will respond with latency of less than 20 millisecond -for 95% of the requests. -You could configure Prometheus to alert based on the following query: - -```text -histogram_quantile(0.95, sum(rate(kong_latency_bucket{type="request"}[1m])) by (le,service)) > 20 -``` - -The query calculates the 95th percentile of the the total request -latency (or duration) for all of your services and alerts you if it is more -than 20 milliseconds. -The “type” label in this query is “request”, which tracks the latency -added by Kong and the service. -You can switch this to “upstream” to track latency added by the service only. -Prometheus is highly flexible and well documented, so we won’t go into -details of setting up alerts here, but you’ll be able to find them -in the Prometheus documentation. - -### Kong Proxy Latency - -![Proxy latencies](/assets/images/products/kubernetes-ingress-controller/proxy-latencies.png) - -Kong also collects metrics about its performance. -The following query is similar to the previous one but gives -us insight into latency added by Kong: - -```text -histogram_quantile(0.90, sum(rate(kong_latency_bucket{type="kong"}[1m])) by (le,service)) > 2 -``` - -### Error Rates - -![Error rates](/assets/images/products/kubernetes-ingress-controller/error-rates.png) - -Another important metric to track is the rate of errors and requests -your services are serving. -The time series `kong_http_status` collects HTTP status code metrics -for each service. - -This metric can help you track the rate of errors for each of your service: - -```text -sum(rate(kong_http_status{code=~"5[0-9]{2}"}[1m])) by (service) -``` - -You can also calculate the percentage of requests in any duration -that are errors. Try to come up with a query to derive that result. - -Please note that all HTTP status codes are indexed, meaning you could use -the data to learn about your typical traffic pattern and identify problems. -For example, a sudden rise in 404 response codes could be indicative -of client codes requesting an endpoint that was removed in a recent deploy. - -### Request Rate and Bandwidth - -![Request rates](/assets/images/products/kubernetes-ingress-controller/request-rate.png) - -One can derive the total request rate for each of your services or -across your Kubernetes cluster using the `kong_http_status` time series. - -![Bandwidth](/assets/images/products/kubernetes-ingress-controller/bandwidth.png) - -Another metric that Kong keeps track of is the amount of -network bandwidth (`kong_bandwidth`) being consumed. -This gives you an estimate of how request/response sizes -correlate with other behaviors in your infrastructure. - -You now have metrics for the services running inside your Kubernetes cluster -and have much more visibility into your applications, without making -any modifications in your services. -You can use Alertmanager or Grafana to now configure alerts based on -the metrics observed and your SLOs. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/redis-rate-limiting.md b/app/kubernetes-ingress-controller/1.2.x/guides/redis-rate-limiting.md deleted file mode 100644 index 64f13251f006..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/redis-rate-limiting.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Using Redis for rate-limiting ---- - -Kong can rate-limit your traffic without any external dependency. -In such a case, Kong stores the request counters in-memory -and each Kong node applies the rate-limiting policy independently. -There is no synchronization of information being done in this case. -But if Redis is available in your cluster, Kong -can take advantage of it and synchronize the rate-limit information -across multiple Kong nodes and enforce a slightly different rate-limiting -policy. - -This guide walks through the steps of using Redis for rate-limiting in -a multi-node Kong deployment. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Set up rate-limiting - -We will start by creating a global rate-limiting policy: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -Here we are configuring the {{site.kic_product_name}} to rate-limit traffic from -any client to 5 requests per minute, and we are applying this policy in a -global sense, meaning the rate-limit will apply across all services. - -You can set this up for a specific Ingress or a specific service as well, -please follow [using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) -guide on steps for doing that. - -Next, test the rate-limiting policy by executing the following command -multiple times and observe the rate-limit headers in the response: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -As there is a single Kong instance running, Kong correctly imposes -the rate-limit and you can make only 5 requests in a minute. - -## Scale the controller to multiple pods - -Now, let's scale up the {{site.kic_product_name}} deployment to 3 pods, for -scalability and redundancy: - -```bash -$ kubectl scale --replicas 3 -n kong deployment ingress-kong -deployment.extensions/ingress-kong scaled -``` - -It will take a couple minutes for the new pods to start up. -Once the new pods are up and running, test the rate-limiting policy by -executing the following command and observing the rate-limit headers: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -You will observe that the rate-limit is not consistent anymore -and you can make more than 5 requests in a minute. - -To understand this behavior, we need to understand how we have configured Kong. -In the current policy, each Kong node is tracking a rate-limit in-memory -and it will allow 5 requests to go through for a client. -There is no synchronization of the rate-limit information across Kong nodes. -In use-cases where rate-limiting is used as a protection mechanism and to -avoid over-loading your services, each Kong node tracking its own counter -for requests is good enough as a malicious user will hit rate-limits on all -nodes eventually. -Or if the load-balancer in-front of Kong is performing some -sort of deterministic hashing of requests such that the same Kong node always -receives the requests from a client, then we won't have this problem at all. - -In some cases, a synchronization of information that each Kong node maintains -in-memory is needed. For that purpose, Redis can be used. -Let's go ahead and set this up next. - -## Deploy Redis to your Kubernetes cluster - -First, we will deploy redis in our Kubernetes cluster: - -```bash -$ kubectl apply -n kong -f https://bit.ly/k8s-redis -deployment.apps/redis created -service/redis created -``` - -Once this is deployed, let's update our KongClusterPlugin configuration to use -Redis as a datastore rather than each Kong node storing the counter information -in-memory: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: redis - redis_host: redis -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit configured -``` - -Notice, how the `policy` is now set to `redis` and we have configured Kong -to talk to the `redis` server available at `redis` DNS name, which is the -Redis node we deployed earlier. - -## Test it - -Now, if you go ahead and execute the following commands, you should be able -to make only 5 requests in a minute: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -This guide shows how to use Redis as a data-store for rate-limiting plugin, -but this can be used for other plugins which support Redis as a data-store -like proxy-cache. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/setting-up-custom-plugins.md b/app/kubernetes-ingress-controller/1.2.x/guides/setting-up-custom-plugins.md deleted file mode 100644 index bb56a4b3b174..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/setting-up-custom-plugins.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Setting up custom plugin in Kubernetes environment ---- - -This guide goes through steps on installing a custom plugin -in Kong without using a Docker build. - -## Prepare a directory with plugin code - -First, we need to create either a ConfigMap or a Secret with -the plugin code inside it. -If you would like to install a plugin which is available as -a rock from Luarocks, then you need to download it, unzip it and create a -ConfigMap from all the Lua files of the plugin. - -We are going to setup a dummy plugin next. -If you already have a real plugin, you can skip this step. - -```shell -$ mkdir myheader && cd myheader -$ echo 'local MyHeader = {} - -MyHeader.PRIORITY = 1000 - -function MyHeader:header_filter(conf) - -- do custom logic here - kong.response.set_header("myheader", conf.header_value) -end - -return MyHeader -' > handler.lua - -$ echo 'return { - name = "myheader", - fields = { - { config = { - type = "record", - fields = { - { header_value = { type = "string", default = "roar", }, }, - }, - }, }, - } -} -' > schema.lua -``` - -Once we have our plugin code available in a directory, -the directory should look something like this: - -```shell -$ tree myheader -myheader -├── handler.lua -└── schema.lua - -0 directories, 2 files -``` - -You might have more files inside the directory as well. - -## Create a ConfigMap or Secret with the plugin code - -Next, we are going to create a ConfigMap or Secret based on the plugin -code. - -Please ensure that this is created in the same namespace as the one -in which Kong is going to be installed. - -```shell -# using ConfigMap; replace `myheader` with the name of your plugin -$ kubectl create configmap kong-plugin-myheader --from-file=myheader -n kong -configmap/kong-plugin-myheader created - -# OR using Secret -$ kubectl create secret generic -n kong kong-plugin-myheader --from-file=myheader -secret/kong-plugin-myheader created -``` - -## Modify configuration - -Next, we need to update Kong's Deployment to load our custom plugin. - -Based on your installation method, this step will differ slightly. -The next section explains what changes are necessary. - -### YAML - -The following patch is necessary to load the plugin. -Notable changes: -- The plugin code is mounted into the pod via `volumeMounts` and `volumes` - configuration property. -- `KONG_PLUGINS` environment variable is set to include the custom plugin - along with all the plugins that come in Kong by default. -- `KONG_LUA_PACKAGE_PATH` environment variable directs Kong to look - for plugins in the directory where we are mounting them. - -If you have multiple plugins, simply mount multiple -ConfigMaps and include the plugin name in the `KONG_PLUGINS` -environment variable. - -> Please note that if your plugin code involves database - migration then you need to include the below patch to pod definition of your - migration Job as well. - -Please note that the below is not a complete definition of -the Deployment but merely a strategic patch which can be applied to -an existing Deployment. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ingress-kong - namespace: kong -spec: - template: - spec: - containers: - - name: proxy - env: - - name: KONG_PLUGINS - value: bundled,myheader - - name: KONG_LUA_PACKAGE_PATH - value: "/opt/?.lua;;" - volumeMounts: - - name: kong-plugin-myheader - mountPath: /opt/kong/plugins/myheader - volumes: - - name: kong-plugin-myheader - configMap: - name: kong-plugin-myheader -``` - -### Helm chart - -With Helm, this is as simple as adding the following values to -your `values.yaml` file: - -```yaml -# values.yaml -plugins: - configMaps: # change this to 'secrets' if you created a secret - - name: kong-plugin-myheader - pluginName: myheader -``` - -The chart automatically configures all the environment variables based on the -plugins you inject. - -Please ensure that you add in other configuration values -you might need for your installation to work. - -### Deploy - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Once, you have all the pieces in place, you are ready -to deploy the {{site.kic_product_name}}: - -```shell -# using YAML or kustomize -kustomize build github.com/hbagdi/yaml/kong/kong-custom-plugin | kubectl apply -f - - -# or helm -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong --values values.yaml - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false --values values.yaml -``` - -Once you have setup Kong with the custom plugin installed, you can use it -like any other plugin. - -First, create a `KongPlugin` custom resource: - -```yaml -echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: my-custom-plugin -config: - header_value: "my first plugin" -plugin: myheader -" | kubectl apply -f - -``` - -and then can annotate an Ingress or Service resource to instruct -Kong on when to execute the plugin: - -```yaml -konghq.com/plugins: my-custom-plugin -``` - -Once you have got Kong up and running, configure your -custom plugin via [KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/). - - -### Plugins in other languages - -When deploying custom plugins in other languages, especially Golang, the built binary is larger than -the size limit of ConfigMap. In such cases, consider using an init container to pull large binaries from -remotes like S3 buckets, or build a custom image that includes plugin runtimes and the plugin itself. - -To read more about building a custom image, see -[use external plugins in container and Kubernetes](/gateway/latest/reference/external-plugins/#use-external-plugins-in-container-and-kubernetes). diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/upstream-mtls.md b/app/kubernetes-ingress-controller/1.2.x/guides/upstream-mtls.md deleted file mode 100644 index 7a6e50c40656..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/upstream-mtls.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Using mTLS with Kong ---- - -This guide walks through on how to setup Kong to perform mutual-TLS -authentication with an upstream service. - -> Please note that this guide walks through mTLS configuration between -Kong and a Service and not Kong and a client or consumer. - -## What is mTLS? - -Mutual authentication refers to two-way authencation, where the client and -server, both can authenticate themselves to the other party. - -With mutual TLS authentication, client and server both present TLS -certificates to the other party (and can prove their identity using their -private key) during the TLS handshake. They can verify the other's -certificate using the their trusted CAs. - -## mTLS with Kong - -Kong 1.3 and above support mutual TLS authentication between Kong and the -upstream service. - -Let's take a look at how one can configure it. - -## Configure Kong to verify upstream server certificate - -Kong, by default, does not verify the certificate presented by the upstream -service. - -To enforce certificate verification, you need to configure the following -environment variables on Kong's container in your deployment: - -``` -KONG_NGINX_PROXY_PROXY_SSL_VERIFY="on" -KONG_NGINX_PROXY_PROXY_SSL_VERIFY_DEPTH="3" -KONG_NGINX_PROXY_PROXY_SSL_TRUSTED_CERTIFICATE="/path/to/ca_certs.pem" -``` - -These basically translate to -[NGINX directives](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) -to configure NGINX to verify certificates. - -Please make sure that the trusted certificates are correctly -mounted into Kong's container and the path to certificate is correctly -reflected in the above environment variable. - -## Configure Kong to present its certificate to the upstream server - -In the above section, we achieved one side of mutual authentication, -where Kong has been configured to verify the identity of the upstream server. - -In this section, we will configure Kong to present its identity to the -upstream server. - -To configure this, you have two options, depending on your use-case. -If you would like Kong to present its client certificate to each and every -service that it talks to, you can configure the client certificate -at the global level using Nginx directives. -If you would like to configure a different certificate for -each service that Kong talks to or want to configure Kong to present a -client certificate only to a subset of all services that it is configured to -communicate with, then you can configure that using an annotation on -the Kubernetes Service resource. - -### Global Nginx directive - -You need to configure two Nginx directives for this purpose: -- [`proxy_ssl_certificate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) -- [`proxy_ssl_certificate_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) - -You can mount the certificate and key pair using secrets into the Kong pod -and then set the following two environment variables to set the above two -directives: - -``` -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE="/path/to/client_cert.pem" -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE_KEY="/path/to/key.pem" -``` - -Once configured, Kong will present its client certificate to every upstream -server that it talks to. - -### Per service annotation - -To configure a different client certificate for each service or only for a -subset of services, you can do so using the -[`konghq.com/client-cert`](/kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcom/client-cert) -annotation. - -To use the annotation, you first need to create a TLS secret with the -client certificate and key in Kubernetes. -The secret should be created in the same namespace as your Kubernetes -Service to which Kong should authenticate itself. - -Once the secret is in place, add the follow annotation on the service: - -``` -konghq.com/client-cert: -``` - -Kong will then use the TLS key-pair to authenticate itself against that service. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-consumer-credential-resource.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-consumer-credential-resource.md deleted file mode 100644 index 8c1acc30e7f8..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-consumer-credential-resource.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Provisioning Consumers and Credentials ---- - -This guide walks through how to use the KongConsumer custom -resource and use Secret resources to associate credentials with those -consumers. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Add authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. - -Let's add a KongPlugin resource to protect the API: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Now, associate this plugin with the previous Ingress rule we created -using the `konghq.com/plugins` annotation: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - konghq.com/plugins: httpbin-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any request matching the proxying rules defined in the `demo` ingress will -now require a valid API key: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 19:30:33 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -{"message":"No API key found in request"} -``` - -As you can see above, Kong returns back a `401 Unauthorized` because -we didn't provide an API key. - -## Provision a Consumer - -Let's create a KongConsumer resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, let's provision an API-key associated with -this consumer so that we can pass the authentication imposed by Kong: - -Next, we will create a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) -resource with an API-key inside it: - -```bash -$ kubectl create secret generic harry-apikey \ - --from-literal=kongCredType=key-auth \ - --from-literal=key=my-sooper-secret-key -secret/harry-apikey created -``` - -The type of credential is specified via `kongCredType`. -You can create the Secret using any other method as well. - -Since we are using the Secret resource, -Kubernetes will encrypt and store this API-key for us. - -Next, we will associate this API-key with the consumer we created previously. - -Please note that we are not re-creating the KongConsumer resource but -only updating it to add the `credentials` array: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -## Use the credential - -Now, use the credential to pass authentication: - -```bash -$ curl -i -H 'apikey: my-sooper-secret-key' $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:34:44 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -In this guide, we learned how to leverage an authentication plugin in Kong -and provision credentials. This enables you to offload authentication into -your Ingress layer and keeps the application logic simple. - -All other authentication plugins bundled with Kong work in this -way and can be used to quickly add an authentication layer on top of -your microservices. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-external-service.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-external-service.md deleted file mode 100644 index 3e47325ff76c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-external-service.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Expose an external application ---- - -This example shows how we can expose a service located outside the Kubernetes cluster using an Ingress. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a Kubernetes service - -First we need to create a Kubernetes Service [type=ExternalName][0] using the hostname of the application we want to expose. - -```bash -echo " -kind: Service -apiVersion: v1 -metadata: - name: proxy-to-httpbin -spec: - ports: - - protocol: TCP - port: 80 - type: ExternalName - externalName: httpbin.org -" | kubectl create -f - -``` - -## Create an Ingress to expose the service at the path `/foo` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: proxy-from-k8s-to-httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: proxy-to-httpbin - servicePort: 80 -' | kubectl create -f - -``` - -## Test the service - -```bash -$ curl -i $PROXY_IP/foo -``` - -[0]: https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-ingress-with-grpc.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-ingress-with-grpc.md deleted file mode 100644 index 64995357181b..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-ingress-with-grpc.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Using Ingress with gRPC ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Pre-requisite - -To make `gRPC` requests, you need a client which can invoke gRPC requests. -In this guide, we use -[`grpcurl`](https://github.com/fullstorydev/grpcurl#installation). -Please ensure that you have that installed in on your local system. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -#### Running GRPC - -1. Add a grpc deployment and service - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/sample-apps/grpc.yaml -service/grpcbin created -deployment.apps/grpcbin created -``` -1. Create a demo grpc ingress rule: - -```bash -$ echo "apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: grpcbin - servicePort: 9001" | kubectl apply -f - -ingress.extensions/demo created -``` -1. Next, we need to update the Ingress rule to specify gRPC as the protocol. -By default, all routes are assumed to be either HTTP or HTTPS. This annotation -informs Kong that this route is a gRPC(s) route and not a plain HTTP route: - -``` -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"grpc,grpcs"}}}' -``` - -1. Next, we also update the upstream protocol to be `grpcs`. -Similar to routes, Kong assumes that services are HTTP-based by default. -With this annotation, we configure Kong to use gRPCs protocol when it -talks to the upstream service: - -``` -$ kubectl patch svc grpcbin -p '{"metadata":{"annotations":{"konghq.com/protocol":"grpcs"}}}' -``` - -1. You should be able to run a request over `gRPC`: - -``` -$ grpcurl -v -d '{"greeting": "Kong Hello world!"}' -insecure $PROXY_IP:443 hello.HelloService.SayHello -``` diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-kong-with-knative.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-kong-with-knative.md deleted file mode 100644 index 7fe55abf04fc..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-kong-with-knative.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: Using Kong with Knative ---- - -The {{site.kic_product_name}} supports managing ingress traffic for -serverless workloads managed via Knative. - -In this guide, we will learn how to use Kong with Knative services and -configure plugins for Knative services. - - -## Pre-requisite - -This guide will be easier to follow if you have access to a Kubernetes -cluster that is running in the cloud rather than Minikube or any other -local environment. The guide requires access to DNS and a public IP -address or DNS name will certainly keep things simpler and easy for you. - -## Install Knative - -If you don't have knative installed, you need to install Knative: - -``` -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-crds.yaml -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.13.0/serving-core.yaml -``` - -This will install the resources that are required to run Knative. - -## Install Kong - -Next, install the {{site.kic_product_name}}: - -``` -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -``` - -You can choose to install a different flavor, like using a database, -or using an Enterprise installation instead of Open-Source. You can also -use Helm installation method if that works for you. - -Once Kong is installed, -you should note down the IP address or public CNAME of -`kong-proxy` service. - -In the current case case, - -```shell -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.248.154 35.247.39.83 80:30345/TCP,443:31872/TCP 53m -``` - -Take a note of the above IP address "`35.247.39.83`". This will be different -for your installation. - -## Configure Knative to use Kong for Ingress - -### Ingress class - -Next, we will configure Knative to use `kong` as the Ingress class: - -``` -$ kubectl patch configmap/config-network \ - --namespace knative-serving \ - --type merge \ - --patch '{"data":{"ingress.class":"kong"}}' -``` - -## Setup Knative domain - -As the final step, we need to configure Knative's base domain at which -our services will be accessible. - -We override the default ConfigMap with the DNS name of `${KONG_IP}.xip.io`. -This will be different for you: - -``` -$ echo ' -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-domain - namespace: knative-serving - labels: - serving.knative.dev/release: v0.13.0 -data: - 35.247.39.83.xip.io: "" -' | kubectl apply -f - -configmap/config-domain configured -``` - -Once this is done, the setup is complete and we can move onto using Knative -and Kong. - -## Test connectivity to Kong - -Send a request to the above domain that we have configured: - -```bash -curl -i http://35.247.39.83.xip.io/ -HTTP/1.1 404 Not Found -Date: Wed, 11 Mar 2020 00:18:49 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -X-Kong-Response-Latency: 1 -Server: kong/1.4.3 - -{"message":"no Route matched with those values"} -``` - -The 404 response is expected since we have not configured any services -in Knative yet. - -## Install a Knative Service - -Let's install our first Knative service: - -``` -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -``` - -It can take a couple of minutes for everything to get configured but -eventually, you will see the URL of the Service. -Let's make the call to the URL: - -```shell -$ curl -v http://helloworld-go.default..xip.io -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Tue, 10 Mar 2020 23:45:14 GMT -X-Kong-Upstream-Latency: 2723 -X-Kong-Proxy-Latency: 0 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -The request is served by Knative and from the response HTTP headers, -we can tell that the request was proxied by Kong. - -The first request will also take longer to complete as Knative will spin -up a new Pod to service the request. -We can see how Kong observed this latency and recorded it in the -`X-Kong-Upstream-Latency` header. -If you perform subsequent requests, -they should complete much faster. - -## Plugins for knative services - -Let's now execute a plugin for our new Knative service. - -First, let's create a KongPlugin resource: - -```shell -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong -plugin: response-transformer -" | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will update the Knative service created before and add in -annotation in the template: - -```shell -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - metadata: - annotations: - konghq.com/plugins: add-response-header - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -service.serving.knative.dev/helloworld-go configured -``` - -Please note that the annotation `konghq.com/plugins` is -not added to the Service definition -itself but to the `spec.template.metadata.annotations`. - -Let's make the request again: - -```shell -$ curl -i http://helloworld-go.default.35.247.39.83.xip.io/ -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Wed, 11 Mar 2020 00:35:07 GMT -demo: injected-by-kong -X-Kong-Upstream-Latency: 2455 -X-Kong-Proxy-Latency: 1 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -As we can see, the response has the `demo` header injected. - -This guide demonstrates the power of using Kong and Knative together. -Checkout other plugins and try them out with multiple Knative services. -The possibilities are endless! diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongclusterplugin-resource.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-kongclusterplugin-resource.md deleted file mode 100644 index 141f2043f806..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongclusterplugin-resource.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Using KongClusterPlugin resource ---- - -In this guide, we will learn how to use KongClusterPlugin resource to configure -plugins in Kong. -The guide will cover configuring a plugin for services across different -namespaces. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service in their corresponding namespaces. - -```bash -$ kubectl create namespace httpbin -namespace/httpbin created -$ kubectl apply -n httpbin -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: httpbin-app - namespace: httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created - -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: echo-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -# access httpbin service -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# access echo service -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -## Create KongClusterPlugin resource - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header created -``` - -Note how the resource is created at cluster-level and not in any specific -namespace: - -```bash -$ kubectl get kongclusterplugins -NAME PLUGIN-TYPE AGE -add-response-header response-transformer 4s -``` - -If you send requests to `PROXY_IP` now, you will see that the header is not -injected in the responses. The reason being that we have created a -resource but we have not told Kong when to execute the plugin. - -## Configuring plugins on Ingress resources - -We will associate the `KongClusterPlugin` resource with the two Ingress resources -that we previously created: - -```bash -$ kubectl patch ingress -n httpbin httpbin-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/httpbin-app patched - -$ kubectl patch ingress -n echo echo-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/echo-app patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching any of the above two Ingress rules is -processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in our two Ingress rules. - -## Updating plugin configuration - -Now, let's update the plugin configuration to change the header value from -`injected-by-kong` to `injected-by-kong-for-kubernetes`: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong-for-kubernetes" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header configured -``` - -If you repeat the requests from the last step, you will see Kong -now responds with updated header value. - -This guides demonstrates how plugin configuration can be shared across -services running in different namespaces. -This can prove to be useful if the persona controlling the plugin -configuration is different from service owners that are responsible for the -Service and Ingress resources in Kubernetes. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongingress-resource.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-kongingress-resource.md deleted file mode 100644 index de12faa0ea8f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongingress-resource.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -title: Using KongIngress resource ---- - -In this guide, we will learn how to use KongIngress resource to control -proxy behavior. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Install a dummy service - -We will start by installing the echo service. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/foo - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/foo -``` - -## Use KongIngress with Ingress resource - -By default, Kong will proxy the entire path to the service. -This can be seen in the real path value in the above response. - -We can configure Kong to strip out the part of the path defined in the -Ingress rule and to only respond to GET requests for this particular rule. - -To modify these behaviours, let's first create a KongIngress resource -defining the new behaviour: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: sample-customization -route: - methods: - - GET - strip_path: true" | kubectl apply -f - -kongingress.configuration.konghq.com/test created -``` - -Now, let's associate this KongIngress resource with our Ingress resource -using the `konghq.com/override` annotation. - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/override":"sample-customization"}}}' -ingress.extensions/demo patched -``` - -Now, Kong will proxy only GET requests on `/foo` path and -strip away `/foo`: - -```bash -$ curl -s $PROXY_IP/foo -X POST -{"message":"no Route matched with those values"} - - -$ curl -s $PROXY_IP/foo/baz - - -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/baz -``` - -As you can see, the real path value is `/baz`. - -## Use KongIngress with Service resource - -KongIngress can be used to change load-balancing, health-checking and other -proxy behaviours in Kong. - -Next, we are going to tweak two settings: - -- Configure Kong to hash the requests based on IP address of the client. -- Configure Kong to proxy all the request on `/foo` to `/bar`. - -Let's create a KongIngress resource with these settings: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-customization -upstream: - hash_on: ip -proxy: - path: /bar/' | kubectl apply -f - -kongingress.configuration.konghq.com/demo-customization created -``` - -Now, let's associate this KongIngress resource to the echo service. - -```bash -$ kubectl patch service echo -p '{"metadata":{"annotations":{"konghq.com/override":"demo-customization"}}}' -service/echo patched -``` - -Let's test this now: - -```bash -$ curl $PROXY_IP/foo/baz -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/bar/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/bar/baz - -<-- clipped --> -``` - -Real path received by the upstream service (echo) is now changed to `/bar/baz`. - -Also, now all the requests will be sent to the same upstream pod: - -```bash -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -``` - - -You can experiement with various load balancing and healthchecking settings -that KongIngress resource exposes to suit your specific use case. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongplugin-resource.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-kongplugin-resource.md deleted file mode 100644 index 02ce7664b7a4..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-kongplugin-resource.md +++ /dev/null @@ -1,469 +0,0 @@ ---- -title: Using KongPlugin resource ---- - -In this guide, we will learn how to use KongPlugin resource to configure -plugins in Kong to modify requests for a specific request path. -The guide will cover configuring a plugin for a specific service, a set of Ingress rules -and for a specific user of the API. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 - - path: /bar - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - - - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -Let's add another Ingress resource which proxies requests to `/baz` to httpbin -service: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-2 - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /baz - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-2 created -``` - -We will use this path later. - -## Configuring plugins on Ingress resource - -Next, we will configure two plugins on the Ingress resource. - -First, we will create a KongPlugin resource: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will associate it with our Ingress rules: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/demo patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching the Ingress rule is processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in the `demo` Ingress resource. - -If we send a request to `/baz`, then we can see that the header is not injected -by Kong: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:56:20 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Here, we have successfully setup a plugin which is executed only when a -request matches a specific `Ingress` rule. - -## Configuring plugins on Service resource - -Next, we will see how we can configure Kong to execute plugins for requests -which are sent to a specific service. - -Let's add a `KongPlugin` resource for authentication on the httpbin service: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Next, we will associate this plugin to the httpbin service running in our -cluster: - -```bash -$ kubectl patch service httpbin -p '{"metadata":{"annotations":{"konghq.com/plugins":"httpbin-auth"}}}' -service/httpbin patched -``` - -Now, any request sent to the service will require authentication, -no matter which `Ingress` rule it matched: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:09:04 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:12:13 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -demo: injected-by-kong -Server: kong/1.2.1 -``` - -You can also see how the `demo` header was injected as the request also -matched one of the rules defined in the `demo` `Ingress` resource. - -## Configure consumer and credential - -Follow the [Using Consumers and Credentials](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) -guide to provision a user and an apikey. -Once you have it, please continue: - -Use the API key to pass authentication: - -```bash -$ curl -I $PROXY_IP/baz -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:16:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:15:34 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 -``` - -## Configure a global plugin - -Now, we will protect our Kubernetes cluster. -For this, we will be configuring a rate-limiting plugin, which -will throttle requests coming from the same client. - -This must be a cluster-level `KongClusterPlugin` resource, as `KongPlugin` -resources cannot be applied globally, to preserve Kubernetes RBAC guarantees -for cross-namespace isolation. - -Let's create the `KongClusterPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -With this plugin (please note the `global` label), every request through -the {{site.kic_product_name}} will be rate-limited: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Configure a plugin for a specific consumer - -Now, let's say we would like to give a specific consumer a higher rate-limit. - -For this, we can create a `KongPlugin` resource and then associate it with -a specific consumer. - -First, create the `KongPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: harry-rate-limit -config: - minute: 10 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/harry-rate-limit created -``` - -Next, associate this with the consumer: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong - konghq.com/plugins: harry-rate-limit -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -Note the annotation being added to the `KongConsumer` resource. - -Now, if the request is made as the `harry` consumer, the client -will be rate-limited differently: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 10 -X-RateLimit-Remaining-minute: 9 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# a regular unauthenticated request -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -This guide demonstrates how you can use the {{site.kic_product_name}} to -impose restrictions and transformations -on various levels using Kubernetes style APIs. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-mtls-auth-plugin.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-mtls-auth-plugin.md deleted file mode 100644 index ab361aeda05c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-mtls-auth-plugin.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Using mtls-auth plugin ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -verify client certificates using CA certificates and -[mtls-auth](https://docs.konghq.com/hub/kong-inc/mtls-auth/) plugin -for HTTPS requests. - -> Note: You need an Enterprise license to use this feature. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -Kong for Kubernetes Enterprise on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise) to configure -this environment variable. - -If everything is set up correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Provision a CA certificate in Kong - -CA certificates in Kong are provisioned by create a `Secret` resource in -Kubernetes. - -The secret resource must have a few properties: -- It must have the `konghq.com/ca-cert: "true"` label. -- It must have a `cert` data property which contains a valid CA certificate - in PEM format. -- It must have an `id` data property which contains a random UUID. -- It must have a `kubernetes.io/ingress.class` annotation whose value matches - the value of the controller's `--ingress-class` argument. By default, that - value is "kong". - -Note that a self-signed CA certificate is being used for the purpose of this -guide. You should use your own CA certificate that is backed by -your PKI infrastructure. - -**This example is used to show the YAML format of a `Secret` resource for the CA certificate. DO NOT directly use the certificate here. -You should use your own CA certificate, or generate a self-signed certificate for testing.** To generate self-signed CA certificates, follow these instructions: - -```bash -openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes\ - -subj "/C=US/ST=California/L=San Francisco/O=Kong/OU=Org/CN=www.example.com" -``` - -```bash -$ echo "apiVersion: v1 -kind: Secret -metadata: - name: my-ca-cert - annotations: - kubernetes.io/ingress.class: kong - labels: - konghq.com/ca-cert: 'true' -type: Opaque -stringData: - cert: | - -----BEGIN CERTIFICATE----- - MIICwTCCAamgAwIBAgIUHGUzUWvHJHrREvIZIcORiFUvze4wDQYJKoZIhvcNAQEL - BQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcNMjAwNTA4MjExODA1WhcNMjAwNjA3MjEx - ODA1WjAQMQ4wDAYDVQQDDAVIZWxsbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC - AQoCggEBANCMMBngjuTvqts8ZXtZhqdr181QH/NmytW1KlyqZd6ppXUer+i0OWhP - 1nAyHsBPJljKAFLd8l1EioPFkN78/wJFDJrHOtfniIQPVLdS2cnNQ72dLyQH6smH - JQDV8ePBQ2GdRP6s61+Da8eoaW6nSLtmEUhxvyteboqwmi2CtUtAfuiU1m5sOdpS - z+L4D08CE+SFIT4MGD3gxNdg7lccWCHIfk54VRSdGDKEVwed8OQvxD0TdpHY+ym5 - nJ4JSkhiS9XIodnxR3AZ6rIPRqk+MQ4LGTjX2EbM0/Yg4qvnZ7m4fcpK2goDZIVL - EF8F+ka1RaAYWTsXI1BAkJbb3kdo/yUCAwEAAaMTMBEwDwYDVR0TBAgwBgEB/wIB - ADANBgkqhkiG9w0BAQsFAAOCAQEAVvB/PeVZpeQ7q2IQQQpADtTd8+22Ma3jNZQD - EkWGZEQLkRws4EJNCCIvkApzpx1GqRcLLL9lbV+iCSiIdlR5W9HtK07VZ318gpsG - aTMNrP9/2XWTBzdHWaeZKmRKB04H4z7V2Dl58D+wxjdqNWsMIHeqqPNKGamk/q8k - YFNqNwisRxMhU6qPOpOj5Swl2jLTuVMAeGWBWmPGU2MUoaJb8sc2Vix9KXcyDZIr - eidkzkqSrjNzI0yJ2gdCDRS4/Rw9iV3B3SRMs0mJMLBDrsowhNfLAd8I3NHzLwps - dZFcvZcT/p717K3hlFVdjGnKIgKcG7aYji/XRR87HKnc+cJMCw== - -----END CERTIFICATE----- - id: cce8c384-721f-4f58-85dd-50834e3e733a" | kubectl create -f - -secret/my-ca-cert created -``` - -Please note the ID, you can use this ID one or use a different one but -the ID is important in the next step when we create the plugin. -Each CA certificate that you create needs a unique ID. -Any random UUID will suffice here and it doesn't have an security -implication. - -You can use [uuidgen](https://linux.die.net/man/1/uuidgen) (Linux, OS X) or -[New-Guid](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/new-guid) -(Windows) to generate an ID. - -For example: -```bash -$ uuidgen -907821fc-cd09-4186-afb5-0b06530f2524 -``` - -## Configure mtls-auth plugin - -Next, we are going to create an `mtls-auth` KongPlugin resource which references -CA certificate provisioned in the last step: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: mtls-auth -config: - ca_certificates: - - cce8c384-721f-4f58-85dd-50834e3e733a - skip_consumer_lookup: true - revocation_check_mode: SKIP -plugin: mtls-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/mtls-auth created -``` - -## Install a dummy service - -Let's deploy an echo service which we wish to protect -using TLS client certificate authentication. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -You can deploy a different service or skip this step if you already -have a service deployed in Kubernetes. - -## Set up Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/plugins: mtls-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -## Test the endpoint - -Now, let's test to see if Kong is asking for client certificate -or not when we make the request: - -``` -$ curl -k https://$PROXY_IP -HTTP/2 401 -date: Mon, 11 May 2020 18:15:05 GMT -content-type: application/json; charset=utf-8 -content-length: 50 -x-kong-response-latency: 0 -server: kong/2.0.4.0-enterprise-k8s - -{"message":"No required TLS certificate was sent"} -``` - -As we can see, Kong is restricting the request because it doesn't -have the necessary authentication information. - -Two things to note here: -- `-k` is used because Kong is set up to serve a self-signed certificate - by default. For full mutual authentication in production use cases, - you must configure Kong to serve a certificate that is signed by a trusted CA. -- For some deployments `$PROXY_IP` might contain a port that points to - `http` port of Kong. In others, it might happen that it contains a DNS name - instead of an IP address. If needed, please update the - command to send an `https` request to the `https` port of Kong or - the load balancer in front of it. - - -## Provisioning credential - -Next, in order to authenticate against Kong, create the client -certificate and private key with the following content: - -{:.important} ->This example is only used to show the format of the client certificate and private key. **DO NOT** use the certificate and private key directly. -You should use a certificate and private key signed by your own CA. - -```bash -$ cat client.crt ------BEGIN CERTIFICATE----- -MIIEFTCCAv0CAWUwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcN -MjAwNTA4MjE0OTE1WhcNMjEwNTA4MjE0OTE1WjCBkDELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxDTALBgNVBAcMBHNvbWUxETAPBgNVBAoMCHNvbWUg -b3JnMRAwDgYDVQQLDAdvcmd1bml0MRswGQYDVQQDDBJleGFtcGxlLmtvbmdocS5j -b20xGzAZBgkqhkiG9w0BCQEWDGZvb0Bzb21lLmNvbTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBAM/y80ppzwGYS7zl+A6fx4Xkjwja+ZUK/AoBDazS3TkR -W1tDFZ71koLd60qK2W1d9Wh0/F3iNTcobVefr02mEcLtl+d4zUug+W7RsK/8JSCM -MIDVDYzlTWdd7RJzV1c/0NFZyTRkEVSjGn6eQoC/1aviftiNyfqWtuIDQ5ctSBt8 -2fyvDwu/tBR5VyKu7CLnjZ/ffjNT8WDfbO704XeBBId0+L8i8J7ddYlRhZufdjEw -hKx2Su8PZ9RnJYShTBOpD0xdveh16eb7dpCZiPnp1/MOCyIyo1Iwu570VoMde9SW -sPFLdUMiCXw+A4Gp/e9Am+D/98PiL4JChKsiowbzpDfMrVQH4Sblpcgn/Pp+u1be -2Kl/7wqr3TA+w/unLnBnB859v3wDhSW4hhKASoFwyX3VfJ43AkmWFUBX/bpDvHto -rFw+MvbSLsS3QD5KlZmega1pNZtin5KV8H/oJI/CjEc9HHwd27alW9VkUu0WrH0j -c98wLHB/9xXLjunabxSmd+wv25SgYNqpsRNOLgcJraJbaRh4XkbDyuvjF2bRJVP4 -pIjntxQHS/oDFFFK3wc7fp/rTAl0PJ7tytYj4urg45N3ts7unwnB8WmKzD9Avcwe -8Kst12cEibS8X2sg8wOqgB0yarC17mBEqONK7Fw4VH+VzZYw0KGF5DWjeSXj/XsD -AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEvTMHe27npmyJUBxQeHcNFniMJUWZf0 -i9EGd+XlF+m/l3rh1/mCecV7s32QTZEiFHv4UJPYASbgtx7+mEZuq7dVsxIUICWs -gyRkwvKjMqK2tR5IRkquhK5PuDS0QC3M/ZsDwnTgaezFrplFYf80z1kAAkm/c7eh -ZEjI6+1vuaS+HX1w2unk42PiAEB6oKFi3b8xl4TC6acYfMYiC3cOa/d3ZKHhqXhT -wM0VtDe0Qn1kExe+19XJG5cROelxmMXBm1+/c2KUw1yK8up6kJlEsmd8JLw/wMUp -xcJUKIH1qGBlRlFTYbVell+dB7IkHhadrnw27Z47uHobB/lzN69r63c= ------END CERTIFICATE----- -``` - -```bash -$ cat client.pem ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAz/LzSmnPAZhLvOX4Dp/HheSPCNr5lQr8CgENrNLdORFbW0MV -nvWSgt3rSorZbV31aHT8XeI1NyhtV5+vTaYRwu2X53jNS6D5btGwr/wlIIwwgNUN -jOVNZ13tEnNXVz/Q0VnJNGQRVKMafp5CgL/Vq+J+2I3J+pa24gNDly1IG3zZ/K8P -C7+0FHlXIq7sIueNn99+M1PxYN9s7vThd4EEh3T4vyLwnt11iVGFm592MTCErHZK -7w9n1GclhKFME6kPTF296HXp5vt2kJmI+enX8w4LIjKjUjC7nvRWgx171Jaw8Ut1 -QyIJfD4Dgan970Cb4P/3w+IvgkKEqyKjBvOkN8ytVAfhJuWlyCf8+n67Vt7YqX/v -CqvdMD7D+6cucGcHzn2/fAOFJbiGEoBKgXDJfdV8njcCSZYVQFf9ukO8e2isXD4y -9tIuxLdAPkqVmZ6BrWk1m2KfkpXwf+gkj8KMRz0cfB3btqVb1WRS7RasfSNz3zAs -cH/3FcuO6dpvFKZ37C/blKBg2qmxE04uBwmtoltpGHheRsPK6+MXZtElU/ikiOe3 -FAdL+gMUUUrfBzt+n+tMCXQ8nu3K1iPi6uDjk3e2zu6fCcHxaYrMP0C9zB7wqy3X -ZwSJtLxfayDzA6qAHTJqsLXuYESo40rsXDhUf5XNljDQoYXkNaN5JeP9ewMCAwEA -AQKCAgAt5cC/HuV6w6OL2PJMQAXozo6ndLV7qQYCK0Nabtw3GVahqQffovIoglTJ -iiX9Vqyw1olRK3l1aC3iFjO6Hrpy3MAXbflaBPio9g1aenuzWF3oQZ4RCBdyhi+q -T9zqPAKaAog/UQrmNG3HnqStCCpgGsjGgV0gOx24euHzPyJYNtFiTT0z6acUkcei -txsVhSgkLk8Lgy6WpBnGEDSnjMl0IGQ6w7p6RgUIPv8PXz3WE5BlBGL7qtnO7slA -Id3JxRnEUDh3U3dB7SS5O7oY9v0b/3CDpsuXe3wd1G432E0Zmf0J9Q63t46CZqmd -d+i9YvRE0BpemNDFnmrr3uQ+x43qARtonEELirY99aW0hUUfD7PieLNnZP7tueVB -J80GUU5ckJhn9u6SlKZtvBU2mAWvaKZEv1+9vDh4Le8fNtubpC5YtSKztc66csL6 -DLtyi81iftpF2YtDVKK8UB35KyG/0IWkXyfquOkYuL8RwrJR9tNL1+Zs4GqgC5sH -fVIwR6/+w/kpeH9nP8/0VaXRoqCjKQpVjFg9f645rZQ/OzcnQNv6k8Sr+4zHaHog -uFwOo7p4QfIPIBfU8+8RD36C5U/p5PiouR8sN+rfDCu0N07XKmHAphlqvjTR+OG/ -J5o3jGgAerMZn3gmiGUS+IdmrPw7we8gc8j8E8C6TjvlALQNOQKCAQEA6ySvPyMw -hiqfa9TeYce1gI2HtRyiCM1r7otFmTqS/I53he7b9LAZ5+gqXxMS/PW9QTvKET2q -vRU+xZYD4h/i9R+qZT3s7EsNBXBQHkvh0m0qNRtrsSgAYCWLsI/0nUOKIz6obHu5 -5SxS8y3y1t9SoXvWpzTpAnuk91BVMtSephf/4/hXlH2d1WnOC0SqS979aRrm8NE/ -rdT5qchhySyfZkYbADxy5AHHqoFTtkxGnLvcbY0X/oJI3zNYCFKTFNmb6/61cxuB -REjwREUFOhneXYb9mBG4bxuteCz65MyshiN1EAsVMnI6aEuHR6EAvt1Jslv7Qi1a -2UKM61XcL8m/lQKCAQEA4mTGaoZJ1yz+TCKMuae33Y9assXOYAQpdb3MY2UTgzQg -JAZYmwaAsBaC1e49G0eYVAP+eDI4u0OR0f0CW9Pf+OdGRWuZfVum0d+PmcIhJfgM -jXsR4CJpPcX7VZLHMZ77QFDh/xBHNXR8F1latPXFYR3ytcXxl4HEtodDpS84AgiO -57yPitY78MS16l3GJGWlgDdRP/LvVixugH2steHCtk8l932/qayUeezbYSEhyQ6L -13f0qRaBhvRsoULj3HvQWNPxmGYK3p+N+zXc1CErF6x8sDq4jeXyNg+26gZknea8 -3SEKKx+Wf4vT3rlUEeYy0uFubG06qYCdtj2ZuSOKNwKCAQEAgJpQqkRRrp8zD6Od -eHbkIonFVd1yFURcKlvLVdF+QFiesAaCD+IcWQRV4Cerc+PmfP35NtK2RbGP4jp4 -pzxvQUbvR23F3Tnyxi2188vmltKTifYUQRCym+MM8iTZUQV2UG5daO+GLPu/5jYU -IUaEh8MWE97RLUV4ZLZv0lwM5KQtlH3nUFQfdW/ne6wzQ0mS6OAIvF6E6EqZvSzV -plQcXbAr5kYpQ+BhVjRjF0nCOMhZ9yR6ofyZZFFNbUfUH0wghcKJdInveew2U/A3 -up4ZSqegnIHckA/gIODg2y/Bj59mz75v+mYU4aOlOhypLroSK1U5JultTXOjZLZR -tWUuvQKCAQAVcti9hOWABlo9WlSczkAENK2yHD12KU7IQegYTp4vowOchiFk5pPQ -mwFgRUbyy7Cp3QsB1jg7vaYWD/NmQceJbFfjAdOz5bgDUDvppFPBpiOCT/OcmYYA -/T3XmKVYlShWqpMOuDsW3GdZSvTmChbeIZk6EXvXD8tUQ7Jr9vJGdwsa92leDPf2 -0pwtjR7Vme+5GwSOm3SDZIg/kiiHvtDUtuDw9q/u4lPazU7nf90UkFU9X7cFQgWZ -hJS6Hn06CVzu3X2ZI6nJ97Ha5/p4+n97qbLSe226u9tbtddtipeDwjWIebXd6gs3 -IEc9Za+KVpXgFs2AZkTVhELs3h8vRCe3AoIBAQDRr0k5OePCsDbs6RadGI9Ta+pf -I30u8imKw8Rih++127UPjpc8OCzaQNvWnpdAoJTgo12fQJqGigRUfJMFFQn7u3jz -ggAq9WLRsXRZpEXk8NXDr/WhksOoWmkxLf4uNO7l2AytIFqZbb1pmTd0g+np2yBE -8VgDR45IxbGPQLsTzKXeXJuXOi7ut2ehJ+VgsS84BsRTeO4v+Y2qpGcyw6fXtU3E -NDrWe/C5QceILtDcd+JiXUgKrHRK+qrfawoxPBDVhYJ+N/Y7SqvZ2GvxibnRs8YA -cbhEebkfUHRQSEqkPr+ndRHInwWTMAWF4IhSuQOpTvT7PY7UNet2io8W8Py6 ------END RSA PRIVATE KEY----- -``` - -Now, use the key and certificate to authenticate against Kong and use the -service: - -```bash -$ curl --key client.key --cert client.crt https://$PROXY_IP/foo -k -I -HTTP/2 200 -content-type: text/plain; charset=UTF-8 -date: Mon, 11 May 2020 18:27:22 GMT -server: echoserver -x-kong-upstream-latency: 1 -x-kong-proxy-latency: 1 -via: kong/2.0.4.0-enterprise-k8s -``` - -## Conclusion - -This guide demonstrates how to implement client TLS authentication -using Kong. -You are free to use other features that mtls-auth plugin in Kong to -achieve more complicated use-cases. diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-oidc-plugin.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-oidc-plugin.md deleted file mode 100644 index 701241331423..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-oidc-plugin.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Using OIDC plugin ---- - -{{site.ee_product_name}}'s OIDC plugin can authenticate requests using OpenID Connect protocol. -This guide shows a basic example of how to setup the OIDC plugin using -the Ingress Controller. - -> Note: This works only with Enterprise version of Kong. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) documentation -to install enterprise version of the {{site.kic_product_name}}. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: 192.0.2.8.xip.io - http: - paths: - - path: / - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -We are using `192.0.2.8.xip.io` as our host, you can use any domain name -of your choice. A domain name is a prerequisite for this guide. -For demo purpose, we are using [xip.io](http://xip.io) -service to avoid setting up a DNS record. - -Test the Ingress rule: - -```bash -$ curl -i 192.0.2.8.xip.io/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Next, open a browser and browse to `http://192.0.2.8.xip.io`. -You should see landing page same as httpbin.org. - -## Setup OIDC plugin - -Now we are going to protect our dummy service with OpenID Connect -protocol using Google as our identity provider. - -First, set up an OAuth 2.0 application in -[Google](https://developers.google.com/identity/protocols/oauth2/openid-connect). - -Once you have setup your application in Google, use the client ID and client -secret and create a KongPlugin resource in Kubernetes: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: oidc-auth -config: - issuer: https://accounts.google.com/.well-known/openid-configuration - client_id: - - - client_secret: - - - redirect_uri: - - http://192.0.2.8.xip.io -plugin: openid-connect -" | kubectl apply -f - -kongplugin.configuration.konghq.com/global-rate-limit created -``` - -The `redirect_uri` parameter must be a URI that matches the Ingress rule we -created earlier. You must also [add it to your Google OIDC -configuration](https://developers.google.com/identity/protocols/oauth2/openid-connect#setredirecturi) - -Next, enable the plugin on our Ingress: - -```bash -$ kubectl patch ing demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"oidc-auth"}}}' -ingress.extensions/demo patched -``` -## Test - -Now, if you visit the host you have set up in your Ingress resource, -Kong should redirect you to Google to verify your identity. -Once you identify yourself, you should be able to browse our dummy service -once again. - -This basic configuration permits any user with a valid Google account to access -the dummy service. -For setting up more complicated authentication and authorization flows, -please read -[plugin docs](/gateway/latest/configure/auth/oidc-google/). diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-rewrites.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-rewrites.md deleted file mode 100644 index cdba8d4bb91f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-rewrites.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Rewriting hosts and paths ---- -This guide demonstrates host and path rewrites using Ingress and Service configuration. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a test Deployment - -To test our requests, we create an echo server Deployment, which responds to -HTTP requests with a summary of the request contents: - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -After completing the examples in the guide, you can clean up the example -configuration with `kubectl delete namespace echo`. - -For your actual production configuration, replace `echo` with whatever -namespace you use to run your application. - -## Create a Kubernetes service - -First, create a Kubernetes Service: - -```bash -echo " -apiVersion: v1 -kind: Service -metadata: - name: echo - namespace: echo -spec: - selector: - app: echo - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 80 -" | kubectl apply -f - -``` - -When referenced by an Ingress, this Service will create a Kong service and -upstream that uses the upstream IPs (Pod IPs) for its `Host` header and appends -request paths starting at `/`. - -## Create an Ingress to expose the service at the path `/myapp` on `example.com` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: my-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: myapp.example.com - http: - paths: - - path: /myapp - backend: - serviceName: echo - servicePort: 80 -' | kubectl create -f - -``` - -This Ingress will create a Kong route attached to the service we created above. -It will preserve its path but honor the service's hostname, so this request: - -```bash -$ curl -svX GET http://myapp.example.com/myapp/foo --resolve myapp.example.com:80:$PROXY_IP -GET /myapp/foo HTTP/1.1 -Host: myapp.example.com -User-Agent: curl/7.70.0 -Accept: */* -``` -will appear upstream as: - -``` -GET /myapp/foo HTTP/1.1 -Host: 10.16.4.8 -User-Agent: curl/7.70.0 -Accept: */* -``` - -We'll use this same cURL command in other examples as well. - -Actual output from cURL and the echo server will be more verbose. These -examples are condensed to focus primarily on the path and Host header. - -Note that this default behavior uses `strip_path=false` on the route. This -differs from Kong's standard default to conform with expected ingress -controller behavior. - -## Rewriting the host - -There are two options to override the default `Host` header behavior: - -- Add the [`konghq.com/host-header` annotation][1] to your Service, which sets - the `Host` header directly: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/host-header":"internal.myapp.example.com"}}}' - ``` - The request upstream will now use the header from that annotation: - ``` - GET /myapp/foo HTTP/1.1 - Host: internal.myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/preserve-host` annotation][0] to your Ingress, which - sends the route/Ingress hostname: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/preserve-host":"true"}}}' - ``` - The request upstream will now include the hostname from the Ingress rule: - ``` - GET /myapp/foo HTTP/1.1 - Host: myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` - -The `preserve-host` annotation takes precedence, so if you add both annotations -above, the upstream host header will be `myapp.example.com`. - -## Rewriting the path - -There are two options to rewrite the default path handling behavior: - -- Add the [`konghq.com/strip-path` annotation][2] to your Ingress, which strips - the path component of the route/Ingress, leaving the remainder of the path at - the root: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/strip-path":"true"}}}' - ``` - The request upstream will now only contain the path components not in the - Ingress rule: - ``` - GET /foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/path` annotation][3] to your Service, which prepends - that value to the upstream path: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/path":"/api"}}}' - ``` - The request upstream will now contain a leading `/api`: - ``` - GET /api/myapp/foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -`strip-path` and `path` can be combined together, with the `path` component -coming first. Adding both annotations above will send requests for `/api/foo`. - -[0]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompreserve-host -[1]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomhost-header -[2]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomstrip-path -[3]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompath diff --git a/app/kubernetes-ingress-controller/1.2.x/guides/using-tcpingress.md b/app/kubernetes-ingress-controller/1.2.x/guides/using-tcpingress.md deleted file mode 100644 index dd759e971f62..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/guides/using-tcpingress.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: TCPIngress with Kong ---- - -This guide walks through using TCPIngress Custom Resource -resource to expose TCP-based services running in Kubernetes to the out -side world. - -## Overview - -TCP-based Ingress means that Kong simply forwards the TCP stream to a Pod -of a Service that's running inside Kubernetes. Kong will not perform any -sort of transformations. - -There are two modes avaialble: -- **Port based routing**: In this mode, Kong simply proxies all traffic it - receives on a specific port to the Kubernetes Service. TCP connections are - load balanced across all the available pods of the Service. -- **SNI based routing**: In this mode, Kong accepts a TLS-encrypted stream - at the specified port and can route traffic to different services based on - the `SNI` present in the TLS handshake. Kong will also terminate the TLS - handshake and forward the TCP stream to the Kubernetes Service. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -> **Note**: This feature works with Kong versions 2.0.4 and above. - -> **Note**: This feature is available in Controller versions 0.8 and above. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Configure Kong for new ports - -First, we will configure Kong's Deployment and Service to expose two new ports -9000 and 9443. Port 9443 expects a TLS connection from the client. - -```shell -$ kubectl patch deploy -n kong ingress-kong --patch '{ - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "proxy", - "env": [ - { - "name": "KONG_STREAM_LISTEN", - "value": "0.0.0.0:9000, 0.0.0.0:9443 ssl" - } - ], - "ports": [ - { - "containerPort": 9000, - "name": "stream9000", - "protocol": "TCP" - }, - { - "containerPort": 9443, - "name": "stream9443", - "protocol": "TCP" - } - ] - } - ] - } - } - } -}' -deployment.extensions/ingress-kong patched -``` - -```shell -$ kubectl patch service -n kong kong-proxy --patch '{ - "spec": { - "ports": [ - { - "name": "stream9000", - "port": 9000, - "protocol": "TCP", - "targetPort": 9000 - }, - { - "name": "stream9443", - "port": 9443, - "protocol": "TCP", - "targetPort": 9443 - } - ] - } -}' -service/kong-proxy patched -``` - -You are free to choose other ports as well. - -## Install TCP echo service - -Next, we will install a dummy TCP service. -If you already have a TCP-based service running in your cluster, -you can use that as well. - -```shell -$ kubectl apply -f https://bit.ly/tcp-echo -deployment.apps/tcp-echo created -service/tcp-echo created -``` - -Now, we have a TCP echo service running in Kubernetes. -We will now expose this on plain-text and a TLS based port. - -## TCP port based routing - -To expose our service to the outside world, create the following -`TCPIngress` resource: - -```shell -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-plaintext - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - port: 9000 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-plaintext created -``` - -Here we are instructing Kong to forward all traffic it receives on port -9000 to `tcp-echo` service on port 2701. - -Once created, we can see the IP address at which this is available: - -```shell -$ kubectl get tcpingress -NAME ADDRESS AGE -echo-plaintext 3m18s -``` - -Lets connect to this service using `telnet`: - -```shell -$ telnet $PROXY_IP 9000 -Trying 35.247.39.83... -Connected to 35.247.39.83. -Escape character is '^]'. -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^] -telnet> Connection closed. -``` - -We can see here that the `tcp-echo` service is now available outside the -Kubernetes cluster via Kong. - -## TLS SNI based routing - -Next, we will demonstrate how Kong can help expose the `tcp-echo` service -in a secure manner to the outside world. - -Create the following TCPIngress resource: - -``` -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-tls - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - port: 9443 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-tls created -``` - -Now, we can access the `tcp-echo` service on port 9443, on SNI `example.com`. - -You should setup a DNS record for a Domain that you control -to point to PROXY_IP and then access -the service via that for production usage. - -In our contrived demo example, we can connect to the service via TLS -using `openssl`'s `s_client` command: - -```shell -$ openssl s_client -connect $PROXY_IP:9443 -servername example.com -quiet -openssl s_client -connect 35.247.39.83:9443 -servername foo.com -quiet -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify error:num=18:self signed certificate -verify return:1 -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify return:1 -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^C -``` - -Since Kong is not configured with a TLS cert-key pair for `example.com`, Kong -is returning a self-signed default certificate, which is not trusted. -You can also see that the echo service is running as expected. - -## Bonus - -Scale the `tcp-echo` Deployment to have multiple replicas and observe how -Kong load-balances the TCP-connections between pods. - -## Conclusion - -In this guide, we see how to use Kong's TCP routing capabilities using -TCPIngress Custom Resource. This can be very useful if you have services -running inside Kubernetes that have custom protocols instead of the more -popular HTTP or gRPC protocols. diff --git a/app/kubernetes-ingress-controller/1.2.x/index.md b/app/kubernetes-ingress-controller/1.2.x/index.md deleted file mode 100644 index 7397d297fa16..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/index.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Kong Ingress Controller -subtitle: An ingress controller for the {{site.base_gateway}} ---- - -## Concepts - -### Architecture - -The [design][design] document explains how the {{site.kic_product_name}} works -inside a Kubernetes cluster and configures Kong to proxy traffic as per -rules defined in the Ingress resources. - -### Custom Resources - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, the `KongIngress` Custom resource is used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to [custom resources][crd] concept document for more details. - -### Deployment Methods - -The {{site.kic_product_name}} can be deployed in a variety of deployment patterns. -Please refer to the [deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/) documentation, -which explains all the components -involved and different ways of deploying them based on the use-case. - -### High-availability and Scaling - -The {{site.kic_product_name}} is designed to scale with your traffic -and infrastructure. -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) to understand -failures scenarios, recovery methods, as well as scaling considerations. - -### Ingress classes - -[Ingress classes](/kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes) filter which resources the -controller loads. They ensure that {{site.kic_product_name}} instances do not -load configuration intended for other instances or other ingress controllers. - -### Security - -Please refer to [this document](/kubernetes-ingress-controller/{{page.release}}/concepts/security/) to understand the -default security settings and how to further secure the Ingress Controller. - -## Guides and Tutorials - -Please browse through [guides][guides] to get started or understand how to configure -a specific setting with the {{site.kic_product_name}}. - -## Configuration Reference - -The configurations in the {{site.kic_product_name}} can be tweaked using -Custom Resources and annotations. -Please refer to the following documents detailing this process: - -- [Custom Resource Definitions](/kubernetes-ingress-controller/{{page.release}}/references/custom-resources/) -- [Annotations](/kubernetes-ingress-controller/{{page.release}}/references/annotations/) -- [CLI arguments](/kubernetes-ingress-controller/{{page.release}}/references/cli-arguments/) -- [Version compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/version-compatibility/) -- [Plugin compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/plugin-compatibility/) - -## FAQs - -[FAQs][faqs] will help find answers to common problems quickly. -Please feel free to open Pull Requests to contribute to the list. - -## Troubleshooting - -Please read through our [deployment guide][deployment] for a detailed -understanding of how Ingress Controller is designed and deployed -along alongside Kong. - -- [FAQs][faqs] might help as well. -- [Troubleshooting][troubleshooting] guide can help - resolve some issues. - Please contribute back if you feel your experience can help - the larger community. - -[annotations]: /kubernetes-ingress-controller/{{page.release}}/references/annotations -[crd]: /kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources -[deployment]: /kubernetes-ingress-controller/{{page.release}}/deployment/overview -[design]: /kubernetes-ingress-controller/{{page.release}}/concepts/design -[faqs]: /kubernetes-ingress-controller/{{page.release}}/faq -[troubleshooting]: /kubernetes-ingress-controller/{{page.release}}/troubleshooting -[guides]: /kubernetes-ingress-controller/{{page.release}}/guides/overview diff --git a/app/kubernetes-ingress-controller/1.2.x/references/annotations.md b/app/kubernetes-ingress-controller/1.2.x/references/annotations.md deleted file mode 100644 index b4dae4e7caca..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/references/annotations.md +++ /dev/null @@ -1,473 +0,0 @@ ---- -title: Kong Ingress Controller annotations ---- - -The {{site.kic_product_name}} supports the following annotations on various -resources: - -## Ingress resource - -Following annotations are supported on Ingress resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the Ingress rules that Kong should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for specific Ingress. | -| [`konghq.com/protocols`](#konghqcomprotocols) | Set protocols to handle for each Ingress resource. | -| [`konghq.com/preserve-host`](#konghqcompreserve-host) | Pass the `host` header as is to the upstream service. | -| [`konghq.com/strip-path`](#konghqcomstrip-path) | Strip the path defined in Ingress resource and then forward the request to the upstream service. | -| [`konghq.com/https-redirect-status-code`](#konghqcomhttps-redirect-status-code) | Set the HTTPS redirect status code to use when an HTTP request is received. | -| [`konghq.com/regex-priority`](#konghqcomregex-priority) | Set the route's regex priority. | -| [`konghq.com/methods`](#konghqcommethods) | Set methods matched by this Ingress. | -| [`konghq.com/snis`](#konghqcomsnis) | Set SNI criteria for routes created from this Ingress. | -| [`konghq.com/request-buffering`](#konghqcomrequest-buffering) | Set request buffering on routes created from this Ingress. | -| [`konghq.com/response-buffering`](#konghqcomresponse-buffering) | Set response buffering on routes created from this Ingress. | -| [`konghq.com/override`](#konghqcomoverride) | Control other routing attributes via `KongIngress` resource. | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-ingress-v1beta1` controller flag removes that requirement: -when enabled, the controller will process Ingresses with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is intended for -older configurations, as controller versions prior to 0.10 processed classless -Ingress resources by default. - -## Service resource - -Following annotations are supported on Service resources: - -| Annotation name | Description | -|-----------------|-------------| -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific Service | -| [`konghq.com/protocol`](#konghqcomprotocol) | Set protocol Kong should use to talk to a Kubernetes service | -| [`konghq.com/path`](#konghqcompath) | HTTP Path that is always prepended to each request that is forwarded to a Kubernetes service | -| [`konghq.com/client-cert`](#konghqcomclient-cert) | Client certificate and key pair Kong should use to authenticate itself to a specific Kubernetes service | -| [`konghq.com/host-header`](#konghqcomhost-header) | Set the value sent in the `Host` header when proxying requests upstream | -| [`konghq.com/override`](#konghqcomoverride) | Fine grained routing and load-balancing | -| [`ingress.kubernetes.io/service-upstream`](#ingresskubernetesioservice-upstream) | Offload load-balancing to kube-proxy or sidecar | - -## KongConsumer resource - -Following annotations are supported on KongConsumer resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the KongConsumers that a controller should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific consumer | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-kong-consumer` controller flag removes that requirement: -when enabled, the controller will process KongConsumers with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is primarily intended for -older configurations, as controller versions prior to 0.10 processed classless -KongConsumer resources by default. - -## Annotations - -### kubernetes.io/ingress.class - -If you have multiple Ingress controllers in a single cluster, -you can pick one by specifying the `ingress.class` annotation. -Following is an example of -creating an Ingress with an annotation: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "gce" -spec: - rules: - - host: example.com - http: - paths: - - path: /test1 - backend: - serviceName: echo - servicePort: 80 -``` - -This will target the GCE controller, forcing the {{site.kic_product_name}} to -ignore it. - -On the other hand, an annotation such as - -```yaml -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "kong" -``` - -will target the {{site.kic_product_name}}, forcing the GCE controller -to ignore it. - -The following resources _require_ this annotation by default: - -- Ingress -- KongConsumer -- TCPIngress -- KongClusterPlugin -- Secret resources with the `ca-cert` label - -You can optionally allow Ingress or KongConsumer resources with no class -annotation (by setting the `--process-classless-ingress-v1beta1` or -`--process-classless-kong-consumer` flags, respectively), though recommended -best practice is to leave these flags disabled: the flags are primarily -intended for compatibility with configuration created before this requirement -was introduced in controller 0.10. - -If you allow classless resources, you must take care when using multiple -controller instances in a single cluster: only one controller instance should -enable these flags to avoid different controller instances fighting over -classless resources, which will result in unexpected and unknown behavior. - -The ingress class used by the {{site.kic_product_name}} to filter Ingress -resources can be changed using the `CONTROLLER_INGRESS_CLASS` -environment variable. - -```yaml -spec: - template: - spec: - containers: - - name: kong-ingress-internal-controller - env: - - name: CONTROLLER_INGRESS_CLASS - value: kong-internal -``` - -#### Multiple unrelated {{site.kic_product_name}}s {#multiple-unrelated-controllers} - -In some deployments, one might use multiple {{site.kic_product_name}}s -in the same Kubernetes cluster -(e.g. one which serves public traffic, one which serves "internal" traffic). -For such deployments, please ensure that in addition to different -`ingress-class`, the `--election-id` is also different. - -In such deployments, `kubernetes.io/ingress.class` annotation can be used on the -following custom resources as well: - -- KongPlugin: To configure (global) plugins only in one of the Kong clusters. -- KongConsumer: To create different consumers in different Kong clusters. -- KongCredential: To create associated credentials for consumers. - -### konghq.com/plugins - -> Available since controller 0.8 - -Kong's power comes from its plugin architecture, where plugins can modify -the request and response or impose certain policies on the requests as they -are proxied to your service. - -With the {{site.kic_product_name}}, plugins can be configured by creating -`KongPlugin` Custom Resources and then associating them with an Ingress, Service, -KongConsumer or a combination of those. - -Following is an example of how to use the annotation: - -```yaml -konghq.com/plugins: high-rate-limit, docs-site-cors -``` - -Here, `high-rate-limit` and `docs-site-cors` -are the names of the KongPlugin resources which -should be to be applied to the Ingress rules defined in the -Ingress resource on which the annotation is being applied. - -This annotation can also be applied to a Service resource in Kubernetes, which -will result in the plugin being executed at Service-level in Kong, -meaning the plugin will be -executed for every request that is proxied, no matter which Route it came from. - -This annotation can also be applied to a KongConsumer resource, -which results in plugin being executed whenever the specific consumer -is accessing any of the defined APIs. - -Finally, this annotation can also be applied on a combination of the -following resources: -- **Ingress and KongConsumer** - If an Ingress resource and a KongConsumer resource share a plugin in the - `konghq.com/plugins` annotation then the plugin will be created for the - combination of those to resources in Kong. -- **Service and KongConsumer** - Same as the above case, if you would like to give a specific consumer or - client of your service some special treatment, you can do so by applying - the same annotation to both of the resources. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how this annotation can be used. - - -### konghq.com/path - -> Available since controller 0.8 - -This annotation can be used on a Service resource only. -This annotation can be used to prepend an HTTP path of a request, -before the request is forwarded. - -For example, if the annotation `konghq.com/path: "/baz"` is applied to a -Kubernetes Service `billings`, then any request that is routed to the -`billings` service will be prepended with `/baz` HTTP path. If the -request contains `/foo/something` as the path, then the service will -receive an HTTP request with path set as `/baz/foo/something`. - -### konghq.com/strip-path - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the part of the path specified in the Ingress rule - will be stripped out before the request is sent to the service. - For example, if the Ingress rule has a path of `/foo` and the HTTP request - that matches the Ingress rule has the path `/foo/bar/something`, then - the request sent to the Kubernetes service will have the path - `/bar/something`. -- `"false"`: If set to false, no path manipulation is performed. - -All other values are ignored. -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/strip-path: "true" -``` - -### konghq.com/preserve-host - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the `host` header of the request will be sent - as is to the Service in Kubernetes. -- `"false"`: If set to false, the `host` header of the request is not preserved. - -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/preserve-host: "true" -``` - -### konghq.com/https-redirect-status-code - -> Available since controller 0.8 - -By default, Kong sends HTTP Status Code 426 for requests -that need to be redirected to HTTPS. -This can be changed using this annotations. -Acceptable values are: -- 301 -- 302 -- 307 -- 308 -- 426 - -Any other value will be ignored. - -Sample usage: - -```yaml -konghq.com/https-redirect-status-code: "301" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/regex-priority - -> Available since controller 0.9 - -Sets the `regex_priority` setting to this value on the Kong route associated -with the Ingress resource. This controls the [matching evaluation -order](/gateway/latest/reference/proxy/#evaluation-order) for regex-based -routes. It accepts any integer value. Routes are evaluated in order of highest -priority to lowest. - -Sample usage: - -```yaml -konghq.com/regex-priority: "10" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/methods - -> Available since controller 0.9 - -Sets the `methods` setting on the Kong route associated with the Ingress -resource. This controls which request methods will match the route. Any -uppercase alpha ASCII string is accepted, though most users will use only -[standard methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). - -Sample usage: - -```yaml -konghq.com/methods: "GET,POST" -``` - -### konghq.com/snis - -> Available since controller 1.1 - -Sets the `snis` match criteria on the Kong route associated with this Ingress. -When using route-attached plugins that execute during the certificate -phase (for example, [Mutual TLS Authentication](/hub/kong-inc/mtls-auth/)), -the `snis` annotation allows route matching based on the server name -indication information sent in a client's TLS handshake. - -Sample usage: - -```yaml -konghq.com/snis: "foo.example.com, bar.example.com" -``` - -### konghq.com/request-buffering - -> Available since controller 1.2 - -Enables or disables request buffering on the Kong route associated with this -Ingress. - -Sample usage: - -```yaml -konghq.com/request-buffering: "false" -``` - -### konghq.com/response-buffering - -> Available since controller 1.2 - -Enables or disables response buffering on the Kong route associated with this -Ingress. - -Sample usage: - -```yaml -konghq.com/response-buffering: "false" -``` - -### konghq.com/override - -> Available since controller 0.8 - -This annotation can associate a KongIngress resource with -an Ingress or a Service resource. -It serves as a way to bridge the gap between a sparse Ingress API in Kubernetes -with fine-grained controlled using the properties of Service, Route -and Upstream entities in Kong. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this annotation. - -### konghq.com/protocol - -> Available since controller 0.8 - -This annotation can be set on a Kubernetes Service resource and indicates -the protocol that should be used by Kong to communicate with the service. -In other words, the protocol is used for communication between a -[Kong Service](/gateway/api/admin-ee/latest/#/Services/list-service/) and -a Kubernetes Service, internally in the Kubernetes cluster. - -Accepted values are: -- `http` -- `https` -- `grpc` -- `grpcs` -- `tcp` -- `tls` - -### konghq.com/protocols - -> Available since controller 0.8 - -This annotation sets the list of acceptable protocols for the all the rules -defined in the Ingress resource. -The protocols are used for communication between the -Kong and the external client/user of the Service. - -You usually want to set this annotation for the following two use-cases: -- You want to redirect HTTP traffic to HTTPS, in which case you will use - `konghq.com/protocols: "https"` -- You want to define gRPC routing, in which case you should use - `konghq.com/protocols: "grpc,grpcs"` - -### konghq.com/client-cert - -> Available since controller 0.8 - -This annotation sets the certificate and key-pair Kong should use to -authenticate itself against the upstream service, if the upstream service -is performing mutual-TLS (mTLS) authentication. - -The value of this annotation should be the name of the Kubernetes TLS Secret -resource which contains the TLS cert and key pair. - -Under the hood, the controller creates a Certificate in Kong and then -sets the -[`service.client_certificate`](/gateway/api/admin-ee/latest/#/Services/list-service/) -for the service. - -### konghq.com/host-header - -> Available since controller 0.9 - -Sets the `host_header` setting on the Kong upstream created to represent a -Kubernetes Service. By default, Kong upstreams set `Host` to the hostname or IP -address of an individual target (the Pod IP for controller-managed -configuration). This annotation overrides the default behavior and sends -the annotation value as the `Host` header value. - -If `konghq.com/preserve-host: true` is present on an Ingress (or -`route.preserve_host: true` is present in a linked KongIngress), it will take -precedence over this annotation, and requests to the application will use the -hostname in the Ingress rule. - -Sample usage: - -```yaml -konghq.com/host-header: "test.example.com" -``` - -### ingress.kubernetes.io/service-upstream - -By default, the {{site.kic_product_name}} distributes traffic amongst all the -Pods of a Kubernetes `Service` by forwarding the requests directly to -Pod IP addresses. One can choose the load-balancing strategy to use -by specifying a KongIngress resource. - -However, in some use-cases, the load-balancing should be left up -to `kube-proxy`, or a sidecar component in the case of Service Mesh deployments. - -Setting this annotation to a Service resource in Kubernetes will configure -the {{site.kic_product_name}} to directly forward -the traffic outbound for this Service -to the IP address of the service (usually the ClusterIP). - -`kube-proxy` can then decide how it wants to handle the request and route the -traffic accordingly. If a sidecar intercepts the traffic from the controller, -it can also route traffic as it sees fit in this case. - -Following is an example snippet you can use to configure this annotation -on a `Service` resource in Kubernetes, (please note the quotes around `true`): - -```yaml -annotations: - ingress.kubernetes.io/service-upstream: "true" -``` - -You need {{site.kic_product_name}} >= 0.6 for this annotation. diff --git a/app/kubernetes-ingress-controller/1.2.x/references/cli-arguments.md b/app/kubernetes-ingress-controller/1.2.x/references/cli-arguments.md deleted file mode 100644 index 04675924d95c..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/references/cli-arguments.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: CLI Arguments ---- - -Various settings and configurations of the controller can be tweaked -using CLI flags. - -## Environment variables - -Each flag defined in the table below can also be configured using -an environment variable. The name of the environment variable is `CONTROLLER_` -string followed by the name of flag in uppercase. - -For example, `--ingress-class` can be configured using the following -environment variable: - -``` -CONTROLLER_INGRESS_CLASS=kong-foobar -``` - -It is recommended that all the configuration is done via environment variables -and not CLI flags. - -## Flags - -Following table describes all the flags that are available: - -| Flag | Type | Default | Description | -|-------|------|---------|-------------| -| --admission-webhook-cert-file |`string` | `/admission-webhook/tls.crt` | Path to the PEM-encoded certificate file for TLS handshake.| -| --admission-webhook-key-file |`string` | `/admission-webhook/tls.key` | Path to the PEM-encoded private key file for TLS handshake.| -| --admission-webhook-cert |`string` | none | PEM-encoded certificate string for TLS handshake.| -| --admission-webhook-key |`string` | none | PEM-encoded private key string for TLS handshake.| -| --admission-webhook-listen |`string` | `off` | The address to start admission controller on (ip:port). Setting it to 'off' disables the admission controller.| -| --anonymous-reports |`string` | `true` | Send anonymized usage data to help improve Kong.| -| --apiserver-host |`string` | none | The address of the Kubernetes API server to connect to in the format of protocol://address:port, e.g., "http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.| -| --disable-ingress-extensionsv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `extensions/v1beta1`.| -| --disable-ingress-networkingv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1beta1`.| -| --disable-ingress-networkingv1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1`.| -| --dump-config |`string` | none | Dump generated configuration to a temporary directory when set to `enabled`. When set to `sensitive`, dumps include certificate+key pairs and credentials.| -| --election-id |`string` | `ingress-controller-leader` | The name of ConfigMap (in the same namespace) to use to facilitate leader-election between multiple instances of the controller.| -| --ingress-class |`string` | `kong` | Ingress class name to use to filter Ingress and custom resources when multiple Ingress Controllers are running in the same Kubernetes cluster.| -| --kong-admin-ca-cert-file |`string` | none | Path to PEM-encoded CA certificate file to verify Kong's Admin SSL certificate.| -| --kong-admin-ca-cert |`string` | none | PEM-encoded CA certificate string to verify Kong's Admin SSL certificate.| -| --kong-admin-concurrency |`int` | `10` | Max number of concurrent requests sent to Kong's Admin API.| -| --kong-admin-filter-tag |`string` | `managed-by-ingress-controller` | The tag used to manage entities in Kong.| -| --kong-admin-header |`string` | none | Add a header (key:value) to every Admin API call, this flag can be used multiple times to specify multiple headers.| -| --kong-admin-token |`string` | none | Set the {{site.ee_product_name}} RBAC token to be used by the controller.| -| --kong-admin-tls-server-name |`string` | none | SNI name to use to verify the certificate presented by Kong in TLS.| -| --kong-admin-tls-skip-verify |`boolean` | `false` | Disable verification of TLS certificate of Kong's Admin endpoint.| -| --kong-admin-url |`string` | `http://localhost:8001` | The address of the Kong Admin URL to connect to in the format of `protocol://address:port`.| -| --kong-workspace |`string` | `default` | Workspace in {{site.ee_product_name}} to be configured.| -| --kong-custom-entities-secret |`string` | none | Secret containing custom entities to be populated in DB-less mode, takes the form `namespace/name`.| -| --log-format |`string` | `text` | Format of logs of the controller. Allowed values are `text` and `json`. | -| --log-level |`string` | `info` | Level of logging for the controller. Allowed values are `trace`, `debug`, `info`, `warn`, `error`, `fatal` and `panic`. | -| --enable-reverse-sync |`bool` | `false` | Enable reverse checks from Kong to Kubernetes. Use this option only if a human has edit access to Kong's Admin API. | -| --kubeconfig |`string` | none | Path to kubeconfig file with authorization and master location information.| -| --profiling |`boolean` | `true` | Enable profiling via web interface `host:port/debug/pprof/`. | -| --publish-service |`string` | none | The namespaces and name of the Kubernetes Service fronting the {{site.kic_product_name}} in the form of namespace/name. The controller will set the status of the Ingress resources to match the endpoints of this service. In reference deployments, this is kong/kong-proxy.| -| --publish-status-address |`string` | none | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address.| -| --process-classless-ingress-v1beta1 |`boolean` | `false` | Toggles whether the controller processes `extensions/v1beta1` and `networking/v1beta1` Ingress resources that have no `kubernetes.io/ingress.class` annotation.| -| --process-classless-ingress-v1 |`boolean` | `false` | Toggles whether the controller processes `networking/v1` Ingress resources that have no `kubernetes.io/ingress.class` annotation or class field.| -| --process-classless-kong-consumer |`boolean` | `false` | Toggles whether the controller processes KongConsumer resources that have no `kubernetes.io/ingress.class` annotation.| -| --stderrthreshold |`string` | `2` | logs at or above this threshold go to stderr.| -| --sync-period |`duration` | `10m` | Relist and confirm cloud resources this often.| -| --sync-rate-limit |`float32` | `0.3` | Define the sync frequency upper limit. | -| --update-status |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname.| -| --update-status-on-shutdown |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped.| -| --version |`boolean` | `false` | Shows release information about the {{site.kic_product_name}}.| -| --watch-namespace |`string` | none | Namespace to watch for Ingress and custom resources. The default value of an empty string results in the controller watching for resources in all namespaces and configuring Kong accordingly.| -| --help |`boolean` | `false` | Shows this documentation on the CLI and exit.| diff --git a/app/kubernetes-ingress-controller/1.2.x/references/custom-resources.md b/app/kubernetes-ingress-controller/1.2.x/references/custom-resources.md deleted file mode 100644 index e0f7205e4fea..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/references/custom-resources.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -The Ingress Controller can configure Kong specific features -using several [Custom Resource Definitions(CRDs)][k8s-crd]. - -Following CRDs enables users to declaratively configure all aspects of Kong: - -- [**KongPlugin**](#kongplugin): This resource corresponds to - the [Plugin][kong-plugin] entity in Kong. -- [**KongIngress**](#kongingress): This resource provides fine-grained control - over all aspects of proxy behaviour like routing, load-balancing, - and health checking. It serves as an "extension" to the Ingress resources - in Kubernetes. -- [**KongConsumer**](#kongconsumer): - This resource maps to the [Consumer][kong-consumer] entity in Kong. -- [**TCPIngress**](#tcpingress): - This resource can configure TCP-based routing in Kong for non-HTTP - services running inside Kubernetes. -- [**KongCredential (Deprecated)**](#kongcredential-deprecated): - This resource maps to - a credential (key-auth, basic-auth, jwt, hmac-auth) that is associated with - a specific KongConsumer. - -## KongPlugin - -This resource provides an API to configure plugins inside Kong using -Kubernetes-style resources. - -Please see the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#KongPlugin) -document for how the resource should be used. - -The following snippet shows the properties available in KongPlugin resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: - namespace: -disabled: # optionally disable the plugin in Kong -config: # configuration for the plugin - key: value -configFrom: - secretKeyRef: - name: - key: -plugin: # like key-auth, rate-limiting etc -``` - -- `config` contains a list of `key` and `value` - required to configure the plugin. - All configuration values specific to the type of plugin go in here. - Please read the documentation of the plugin being configured to set values - in here. For any plugin in Kong, anything that goes in the `config` JSON - key in the Admin API request, goes into the `config` YAML key in this resource. - Please use a valid JSON to YAML convertor and place the content under the - `config` key in the YAML above. -- `configFrom` contains a reference to a Secret and key, where the key contains - a complete JSON or YAML configuration. This should be used when the plugin - configuration contains sensitive information, such as AWS credentials in the - Lambda plugin or the client secret in the OIDC plugin. Only one of `config` - or `configFrom` may be used in a KongPlugin, not both at once. -- `plugin` field determines the name of the plugin in Kong. - This field was introduced in {{site.kic_product_name}} 0.2.0. - -**Please note:** validation of the configuration fields is left to the user -by default. It is advised to setup and use the admission validating controller -to catch user errors. - -The plugins can be associated with Ingress -or Service object in Kubernetes using `konghq.com/plugins` annotation. - -### Examples - -#### Applying a plugin to a service - -Given the following plugin: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id - echo_downstream: true -plugin: correlation-id -``` - -It can be applied to a service by annotating like: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp-service - labels: - app: myapp-service - annotations: - konghq.com/plugins: request-id -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: myapp-service - selector: - app: myapp-service -``` - -#### Applying a plugin to an ingress - -The KongPlugin above can be applied to a specific ingress (route or routes): - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -``` - -A plugin can also be applied to a specific KongConsumer by adding -`konghq.com/plugins` annotation to the KongConsumer resource. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how to use this resource. - -#### Applying a plugin with a secret configuration - -The plugin above can be modified to store its configuration in a secret: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -configFrom: - secretKeyRef: - name: plugin-conf-secret - key: request-id -plugin: correlation-id -``` - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: plugin-conf-secret -stringData: - request-id: | - header_name: my-request-id - echo_downstream: true -type: Opaque -``` - -## KongClusterPlugin - -A `KongClusterPlugin` is same as `KongPlugin` resource. The only differences -are that it is a Kubernetes cluster-level resource instead of a namespaced -resource, and can be applied as a global plugin using labels. - -Please consult the [KongPlugin](#kongplugin) section for details. - -*Example:* - -KongClusterPlugin example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: request-id - annotations: - kubernetes.io/ingress.class: - labels: - global: "true" # optional, if set, then the plugin will be executed - # for every request that Kong proxies - # please note the quotes around true -config: - header_name: my-request-id -configFrom: - secretKeyRef: - name: - key: - namespace: -plugin: correlation-id -``` - -As with KongPlugin, only one of `config` or `configFrom` can be used. - -Setting the label `global` to `"true"` will apply the plugin globally in Kong, -meaning it will be executed for every request that is proxied via Kong. - -## KongIngress - -Ingress resource spec in Kubernetes can define routing policies -based on HTTP Host header and paths. -While this is sufficient in most cases, -sometimes, users may want more control over routing at the Ingress level. -`KongIngress` serves as an "extension" to Ingress resource. -It is not meant as a replacement to the -`Ingress` resource in Kubernetes. - -Please read the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#kongingress) -document for why this resource exists and how it relates to the existing -Ingress resource. - -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and -[Route][kong-route] entities in Kong related to an Ingress resource -can be modified. - -Once a `KongIngress` resource is created, it needs to be associated with -an Ingress or Service resource using the following annotation: - -```yaml -konghq.com/override: kong-ingress-resource-name -``` - -Specifically, - -- To override any properties related to health-checking, load-balancing, - or details specific to a service, add the annotation to the Kubernetes - Service that is being exposed via the Ingress API. -- To override routing configuration (like protocol or method based routing), - add the annotation to the Ingress resource. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this resource. - -For reference, the following is a complete spec for KongIngress: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: configuration-demo -upstream: - slots: 10 - hash_on: none - hash_fallback: none - healthchecks: - threshold: 25 - active: - concurrency: 10 - healthy: - http_statuses: - - 200 - - 302 - interval: 0 - successes: 0 - http_path: "/" - timeout: 1 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - interval: 0 - tcp_failures: 0 - timeouts: 0 - passive: - healthy: - http_statuses: - - 200 - successes: 0 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - - 503 - tcp_failures: 0 - timeouts: 0 -proxy: - protocol: http - path: / - connect_timeout: 10000 - retries: 10 - read_timeout: 10000 - write_timeout: 10000 -route: - methods: - - POST - - GET - regex_priority: 0 - strip_path: false - preserve_host: true - protocols: - - http - - https -``` - -## TCPIngress - -The Ingress resource in Kubernetes is HTTP-only. -This custom resource is modeled similar to the Ingress resource but for -TCP and TLS SNI based routing purposes: - -```yaml -apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -spec: - rules: - - host: - port: - backend: - serviceName: - servicePort: -``` - -If `host` is not specified, then port-based TCP routing is performed. Kong -doesn't care about the content of TCP stream in this case. - -If `host` is specified, then Kong expects the TCP stream to be TLS-encrypted -and Kong will terminate the TLS session based on the SNI. -Also note that, the port in this case should be configured with `ssl` parameter -in Kong. - -## KongConsumer - -This custom resource configures a consumer in Kong: - -The following snippet shows the field available in the resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -username: -custom_id: -``` - -An example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: consumer-team-x - annotations: - kubernetes.io/ingress.class: kong -username: team-X -``` - -When this resource is created, a corresponding consumer entity will be -created in Kong. - -Consumers' `username` and `custom_id` values must be unique across the Kong -cluster. While KongConsumers exist in a specific Kubernetes namespace, -KongConsumers from all namespaces are combined into a single Kong -configuration, and no KongConsumers with the same `kubernetes.io/ingress.class` -may share the same `username` or `custom_id` value. - -## KongCredential (Deprecated) - -This custom resource can be used to configure a consumer specific -entities in Kong. -The resource reference the KongConsumer resource via the `consumerRef` key. - -The validation of the config object is left up to the user. - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongCredential -metadata: - name: credential-team-x -consumerRef: consumer-team-x -type: key-auth -config: - key: 62eb165c070a41d5c1b58d9d3d725ca1 -``` - -The following credential types can be provisioned using the KongCredential -resource: - -- `key-auth` for [Key authentication](/hub/kong-inc/key-auth/) -- `basic-auth` for [Basic authentication](/hub/kong-inc/basic-auth/) -- `hmac-auth` for [HMAC authentication](/hub/kong-inc/hmac-auth/) -- `jwt` for [JWT based authentication](/hub/kong-inc/jwt/) -- `oauth2` for [Oauth2 Client credentials](/hub/kong-inc/oauth2/) -- `acl` for [ACL group associations](/hub/kong-inc/acl/) - -Please ensure that all fields related to the credential in Kong -are present in the definition of KongCredential's `config` section. - -Please refer to the -[using the Kong Consumer and Credential resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource) -guide for details on how to use this resource. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ diff --git a/app/kubernetes-ingress-controller/1.2.x/references/plugin-compatibility.md b/app/kubernetes-ingress-controller/1.2.x/references/plugin-compatibility.md deleted file mode 100644 index 66250f3b167f..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/references/plugin-compatibility.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Plugin Compatibility ---- - -DB-less mode is the preferred choice for controller-managed Kong and Kong -Enterprise clusters. However, not all plugins are available in DB-less mode. -Review the table below to check if a plugin you wish to use requires a -database. - -Note that some DB-less compatible plugins have some limitations or require -non-default configuration for -[compatibility](/gateway/latest/reference/db-less-and-declarative-config/#plugin-compatibility). - -## Kong - -| Plugin | Kong | Kong (DB-less) | -|-------------------------|-------------------------------|-------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | - -## {{site.ee_product_name}} - -{% include md/enterprise/k8s-image-note.md %} - -| Plugin | Kong for Kubernetes with {{site.ee_product_name}} | Kong for Kubernetes Enterprise | -|----------------------------------|--------------------------------------------|-------------------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | -| application-registration | | 1 | -| canary release | | | -| collector | | | -| degraphql | | | -| exit-transformer | | | -| forward-proxy | | | -| graphql-proxy-cache-advanced | | | -| graphql-rate-limiting-advanced | | | -| jwt-signer | | | -| kafka-log | | | -| kafka-upstream | | | -| key-auth-enc | | | -| ldap-auth-advanced | | | -| mtls-auth | | | -| oauth2-introspection | | | -| openid-connect | | | -| proxy-cache-advanced | | | -| rate-limiting-advanced | | | -| request-transformer-advanced | | 2 | -| request-validator | | | -| response-transformer-advanced | | | -| route-transformer-advanced | | | -| statsd-advanced | | 3 | -| vault-auth | | | - -1 Only used with Dev Portal - -2 request-transformer now has feature parity with - request-transformer-advanced. request-transformer-advanced remains only for - compatibility with existing configurations. - -3 Only used with Vitals diff --git a/app/kubernetes-ingress-controller/1.2.x/references/version-compatibility.md b/app/kubernetes-ingress-controller/1.2.x/references/version-compatibility.md deleted file mode 100644 index e030800132ba..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/references/version-compatibility.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Version Compatibility ---- - -Kong's Kubernetes ingress controller is compatible with different flavors of Kong. -The following sections detail on compatibility between versions. - -## Kong - -By Kong, we are here referring to the official distribution of the Open-Source -{{site.base_gateway}}. - -| {{site.kic_product_name}} | <= 0.0.4 | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong 0.13.x | | | | | | | | | | | | | | | | -| Kong 0.14.x | | | | | | | | | | | | | | | | -| Kong 1.0.x | | | | | | | | | | | | | | | | -| Kong 1.1.x | | | | | | | | | | | | | | | | -| Kong 1.2.x | | | | | | | | | | | | | | | | -| Kong 1.3.x | | | | | | | | | | | | | | | | -| Kong 1.4.x | | | | | | | | | | | | | | | | -| Kong 1.5.x | | | | | | | | | | | | | | | | -| Kong 2.0.x | | | | | | | | | | | | | | | | -| Kong 2.1.x | | | | | | | | | | | | | | | | -| Kong 2.2.x | | | | | | | | | | | | | | | | -| Kong 2.3.x | | | | | | | | | | | | | | | | - -## Kong-enterprise-k8s - -Kong-enterprise-k8s is an official distribution by Kong, Inc. which bundles -all enterprise plugins into Open-Source {{site.base_gateway}}. - -The compatibility for this distribution will largely follow that of the -Open-Source {{site.base_gateway}} compatibility (the previous section). - -{% include md/enterprise/k8s-image-note.md %} - -| {{site.kic_product_name}} | 0.6.2+ | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | -|:----------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong-enterprise-k8s 1.3.x.y | | | | | | | | | -| Kong-enterprise-k8s 1.4.x.y | | | | | | | | | -| Kong-enterprise-k8s 2.0.x.y | | | | | | | | | - -## {{site.ee_product_name}} - -{{site.ee_product_name}} is the official enterprise distribution, which includes all -other enterprise functionality, built on top of the Open-Source {{site.base_gateway}}. - -| {{site.kic_product_name}} | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| {{site.ee_product_name}} 0.32-x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.33-x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.34-x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.35-x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.36-x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.3.x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.5.x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.1.x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.2.x | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.3.x | | | | | | | | | | | | | | | - -## Kubernetes - -| {{site.kic_product_name}} | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | -|:--------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kubernetes 1.13 | | | | | | -| Kubernetes 1.14 | | | | | | -| Kubernetes 1.15 | | | | | | -| Kubernetes 1.16 | | | | | | -| Kubernetes 1.17 | | | | | | -| Kubernetes 1.18 | | | | | | -| Kubernetes 1.19 | | | | | | -| Kubernetes 1.20 | | | | | | -| Kubernetes 1.21 | | | | | | diff --git a/app/kubernetes-ingress-controller/1.2.x/troubleshooting.md b/app/kubernetes-ingress-controller/1.2.x/troubleshooting.md deleted file mode 100644 index 94c5ab27f882..000000000000 --- a/app/kubernetes-ingress-controller/1.2.x/troubleshooting.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: Debug & Troubleshooting ---- - -## Debug - -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: - -- `--v=3` shows details about the service, Ingress rule, and endpoint changes - -## Authentication to the Kubernetes API Server - -A number of components are involved in the authentication process and the first step is to narrow -down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. -Both authentications must work: - -```text -+-------------+ service +------------+ -| | authentication | | -+ apiserver +<-------------------+ ingress | -| | | controller | -+-------------+ +------------+ - -``` - -## Service authentication - -The Ingress controller needs information from API server to configure Kong. -Therefore, authentication is required, which can be achieved in two different ways: - -1. **Service Account**: This is recommended - because nothing has to be configured. - The Ingress controller will use information provided by the system - to communicate with the API server. - See 'Service Account' section for details. -1. **Kubeconfig file**: In some Kubernetes environments - service accounts are not available. - In this case, a manual configuration is required. - The Ingress controller binary can be started with the `--kubeconfig` flag. - The value of the flag is a path to a file specifying how - to connect to the API server. Using the `--kubeconfig` - does not require the flag `--apiserver-host`. - The format of the file is identical to `~/.kube/config` - which is used by `kubectl` to connect to the API server. - See 'kubeconfig' section for details. - -## Discovering API-server - -Using this flag `--apiserver-host=http://localhost:8080`, -it is possible to specify an unsecured API server or -reach a remote Kubernetes cluster using -[kubectl proxy](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#using-kubectl-proxy). -Please do not use this approach in production. - -In the diagram below you can see the full authentication flow with all options, starting with the browser -on the lower left hand side. - -```text - -Kubernetes Workstation -+---------------------------------------------------+ +------------------+ -| | | | -| +-----------+ apiserver +------------+ | | +------------+ | -| | | proxy | | | | | | | -| | apiserver | | ingress | | | | ingress | | -| | | | controller | | | | controller | | -| | | | | | | | | | -| | | | | | | | | | -| | | service account/ | | | | | | | -| | | kubeconfig | | | | | | | -| | +<-------------------+ | | | | | | -| | | | | | | | | | -| +------+----+ kubeconfig +------+-----+ | | +------+-----+ | -| |<--------------------------------------------------------| | -| | | | -+---------------------------------------------------+ +------------------+ -``` - -## Service Account - -If using a service account to connect to the API server, Dashboard expects the file -`/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret -token that is required to authenticate with the API server. - -Verify with the following commands: - -```shell -# start a container that contains curl -$ kubectl run test --image=tutum/curl -- sleep 10000 - -# check that container is running -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -test-701078429-s5kca 1/1 Running 0 16s - -# check if secret exists -$ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt -namespace -token - -# get service IP of master -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes 10.0.0.1 443/TCP 1d - -# check base connectivity from cluster inside -$ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 -Unauthorized - -# connect using tokens -$ TOKEN_VALUE=$(kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token) -$ echo $TOKEN_VALUE -eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A -$ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $TOKEN_VALUE" https://10.0.0.1 -{ - "paths": [ - "/api", - "/api/v1", - "/apis", - "/apis/apps", - "/apis/apps/v1alpha1", - "/apis/authentication.k8s.io", - "/apis/authentication.k8s.io/v1beta1", - "/apis/authorization.k8s.io", - "/apis/authorization.k8s.io/v1beta1", - "/apis/autoscaling", - "/apis/autoscaling/v1", - "/apis/batch", - "/apis/batch/v1", - "/apis/batch/v2alpha1", - "/apis/certificates.k8s.io", - "/apis/certificates.k8s.io/v1alpha1", - "/apis/extensions", - "/apis/extensions/v1beta1", - "/apis/policy", - "/apis/policy/v1alpha1", - "/apis/rbac.authorization.k8s.io", - "/apis/rbac.authorization.k8s.io/v1alpha1", - "/apis/storage.k8s.io", - "/apis/storage.k8s.io/v1beta1", - "/healthz", - "/healthz/ping", - "/logs", - "/metrics", - "/swaggerapi/", - "/ui/", - "/version" - ] -} -``` - -If it is not working, there are two possible reasons: - -1. The contents of the tokens are invalid. - Find the secret name with `kubectl get secrets --field-selector=type=kubernetes.io/service-account-token` and - delete it with `kubectl delete secret `. - It will automatically be recreated. -1. You have a non-standard Kubernetes installation - and the file containing the token may not be present. - -The API server will mount a volume containing this file, -but only if the API server is configured to use -the ServiceAccount admission controller. -If you experience this error, -verify that your API server is using the ServiceAccount admission controller. -If you are configuring the API server by hand, -you can set this with the `--admission-control` parameter. -Please note that you should use other admission controllers as well. -Before configuring this option, please read about admission controllers. - -More information: - -- [User Guide: Service Accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [Cluster Administrator Guide: Managing Service Accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) - -## Kubeconfig - -If you want to use a kubeconfig file for authentication, -follow the deploy procedure and -add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the deployment - -## Dumping generated Kong configuration - -If the controller generates configuration that it cannot apply to Kong -successfully, reviewing the generated configuration manually and/or applying it -in a test environment can help locate potential causes. - -Under normal operation, the controller does not store generated configuration; -it is only sent to Kong's Admin API. The `--dump-config` flag enables a -diagnostic mode where the controller also saves generated configuration to a -temporary file. To use the diagnostic mode: - -1. Set the `--dump-config` flag (or `CONTROLLER_DUMP_CONFIG` environment - variable) to either `enabled` or `sensitive`. `enabled` produces a redacted - configuration that omits certificate configuration and credentials, suitable - for sharing with Kong support. `sensitive` dumps the complete configuration - exactly as it is sent to the Admin API. -1. Check controller logs for the dump location with `kubectl logs PODNAME -c - ingress-controller | grep "config dumps"`. -1. (Optional) Make a change to a Kubernetes resource that you know will - reproduce the issue. If you are unsure what change caused the issue - originally, you can omit this step. -1. Copy dumped configuration out of the controller for local review with - `kubectl cp PODNAME:/path/to/dump/last_bad.json /tmp/last_bad.json -c - ingress-controller`. If the controller successfully applied configuration - before the failure, you can also look at `last_good.json`. - -Once you have dumped configuration, take one of the following -approaches to isolate issues: - -- If you know of a specific Kubernetes resource change that reproduces the - issue, diffing `last_good.json` and `last_bad.json` will show the change - the controller is trying to apply unsuccessfully. -- You can apply dumped configuration via the `/config` Admin API endpoint - (DB-less mode) or using decK (DB-backed mode) to a test instance not managed - by the ingress controller. This approach lets you review requests - and responses (passing `--verbose 2` to decK will show all requests) and - add debug Kong Lua code when controller requests result in an - unhandled error (500 response). diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/custom-resources.md b/app/kubernetes-ingress-controller/1.3.x/concepts/custom-resources.md deleted file mode 100644 index 03860d41569c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/custom-resources.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Custom Resources ---- - -[Custom Resources][k8s-crd] in Kubernetes allow controllers -to extend Kubernetes-style -declarative APIs that are specific to certain applications. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -The {{site.kic_product_name}} uses the `configuration.konghq.com` API group -for storing configuration specific to Kong. - -The following CRDs allow users to declaratively configure all aspects of Kong: - -- [**KongIngress**](#kongingress) -- [**KongPlugin**](#kongplugin) -- [**KongClusterPlugin**](#kongclusterplugin) -- [**KongConsumer**](#kongconsumer) -- [**TCPIngress**](#tcpingress) -- [**KongCredential (Deprecated)**](#kongcredential-deprecated) - -## KongIngress - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, `KongIngress` Custom Resource is used as an -"extension" to the existing Ingress API to provide fine-grained control -over proxy behavior. -In other words, `KongIngress` works in conjunction with -the existing Ingress resource and extends it. -It is not meant as a replacement for the `Ingress` resource in Kubernetes. -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and [Route][kong-route] -entities in Kong related to an Ingress resource can be modified. - -Once a `KongIngress` resource is created, you can use the `configuration.konghq.com` -annotation to associate the `KongIngress` resource with an `Ingress` or a `Service` -resource: - -- When the annotation is added to the `Ingress` resource, the routing - configurations are updated, meaning all routes associated with the annotated - `Ingress` are updated to use the values defined in the `KongIngress`'s route - section. -- When the annotation is added to a `Service` resource in Kubernetes, - the corresponding `Service` and `Upstream` in Kong are updated to use the - `proxy` and `upstream` blocks as defined in the associated - `KongIngress` resource. - -The below diagram shows how the resources are linked -with one another: - -![Associating Kong Ingress](/assets/images/products/kubernetes-ingress-controller/kong-ingress-association.png "Associating Kong Ingress") - -## KongPlugin - -Kong is designed around an extensible [plugin][kong-plugin] -architecture and comes with a -wide variety of plugins already bundled inside it. -These plugins can be used to modify the request/response or impose restrictions -on the traffic. - -Once this resource is created, the resource needs to be associated with an -`Ingress`, `Service`, or `KongConsumer` resource in Kubernetes. -For more details, please read the reference documentation on `KongPlugin`. - -The below diagram shows how you can link `KongPlugin` resource to an -`Ingress`, `Service`, or `KongConsumer`: - -| | | -:-:|:-: -![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association1.png)|![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association2.png) - -## KongClusterPlugin - -_This resource requires the [`kubernetes.io/ingress.class` annotation](/kubernetes-ingress-controller/{{page.release}}/references/annotations/)._ - -KongClusterPlugin resource is exactly same as KongPlugin, except that it is a -Kubernetes cluster-level resources instead of being a namespaced resource. -This can help when the configuration of the plugin needs to be centralized -and the permissions to add/update plugin configuration rests with a different -persona than application owners. - -This resource can be associated with `Ingress`, `Service` or `KongConsumer` -and can be used in the exact same way as KongPlugin. - -A namespaced KongPlugin resource takes priority over a -KongClusterPlugin with the same name. - -## KongConsumer - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This custom resource configures `Consumers` in Kong. -Every `KongConsumer` resource in Kubernetes directly translates to a -[Consumer][kong-consumer] object in Kong. - -## TCPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing non-HTTP -and non-GRPC services running inside Kubernetes to -the outside world via Kong. This proves to be useful when -you want to use a single cloud LoadBalancer for all kinds -of traffic into your Kubernetes cluster. - -It is very similar to the Ingress resource that ships with Kubernetes. - -## KongCredential (Deprecated) - -Once a `KongConsumer` resource is created, -credentials associated with the `Consumer` can be provisioned inside Kong -using KongCredential custom resource. - -This Custom Resource has been deprecated and will be removed in a future -release. -Instead, please use secret-based credentials. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]:/gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/deployment.md b/app/kubernetes-ingress-controller/1.3.x/concepts/deployment.md deleted file mode 100644 index 28a4ea353f36..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/deployment.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Kong Ingress Controller Deployment ---- - -The {{site.kic_product_name}} is designed to be deployed in a variety of ways -based on uses-cases. This document explains various components involved -and choices one can make as per the specific use-case. - -- [**Kubernetes Resources**](#kubernetes-resources): - Various Kubernetes resources required to run the {{site.kic_product_name}}. -- [**Deployment options**](#deployment-options): - A high-level explanation of choices that one should consider and customize - the deployment to best serve a specific use case. - -## Kubernetes Resources - -The following resources are used to run the {{site.kic_product_name}}: - -- [Namespace](#namespace) -- [Custom resources](#custom-resources) -- [RBAC permissions](#rbac-permissions) -- [Ingress Controller Deployment](#ingress-controller-deployment) -- [Kong Proxy service](#kong-proxy-service) -- [Database deployment and migrations](#database-deployment-and-migration) - -These resources are created if the reference deployment manifests are used -to deploy the {{site.kic_product_name}}. -The resources are explained below for users to gain an understanding of how -they are used, so that they can be tweaked as necessary for a specific use-case. - -### Namespace - -> optional - -The {{site.kic_product_name}} can be deployed in any [namespace][k8s-namespace]. -If {{site.kic_product_name}} is being used to proxy traffic for all namespaces -in a Kubernetes cluster, which is generally the case, -it is recommended that it is installed in a dedicated -`kong` namespace but it is not required to do so. - -The example deployments present in this repository automatically create a `kong` -namespace and deploy resources into that namespace. - -### Custom Resources - -> required - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, custom resources are used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to the [custom resources](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/) -concept document for details. - -### RBAC permissions - -> required - -The {{site.kic_product_name}} communicates with the Kubernetes API-server and -dynamically configures Kong to automatically load balance across pods -of a service as any service is scaled in our out. - -For this reason, it requires RBAC permissions to access resources stored -in Kubernetes object store. - -It needs read permissions (get,list,watch) -on the following Kubernetes resources: - -- Endpoints -- Nodes -- Pods -- Secrets -- Ingress -- KongPlugins -- KongConsumers -- KongCredentials -- KongIngress - -By default, the controller listens for events and above resources across -all namespaces and will need access to these resources at the cluster level -(using `ClusterRole` and `ClusterRoleBinding`). - -In addition to these, it needs: - -- Create a ConfigMap and read and update ConfigMap for to facilitate - leader-election. Please read this [document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) - for more details. -- Update permission on the Ingress resource to update the status of - the Ingress resource. - -If the Ingress Controller is listening for events on a single namespace, -these permissions can be updated to restrict these permissions to a specific -namespace using `Role` and `RoleBinding resources`. - -In addition to these, it is necessary to create a `ServiceAccount`, which -has the above permissions. The Ingress Controller Pod then has this -`ServiceAccount` association. This gives the Ingress Controller process -necessary authentication and authorization tokens to communicate with the -Kubernetes API-server. - -### Ingress Controller deployment - -> required - -Kong Ingress deployment consists of the Ingress Controller deployed alongside -Kong. The deployment will be different depending on if a database is being -used or not. - -The deployment(s) is the core which actually runs the {{site.kic_product_name}}. - -See the [database](#database) section below for details. - -### Kong Proxy service - -> required - -Once the {{site.kic_product_name}} is deployed, one service is needed to -expose Kong outside the Kubernetes cluster so that it can receive all traffic -that is destined for the cluster and route it appropriately. -`kong-proxy` is a Kubernetes service which points to the Kong pods which are -capable of proxying request traffic. This service will be usually of type -`LoadBalancer`, however it is not required to be such. -The IP address of this service should be used to configure DNS records -of all the domains that Kong should be proxying, to route the traffic to Kong. - -### Database deployment and migration - -> optional - -The {{site.kic_product_name}} can run with or without a database. -If a database is being deployed, then following resources are required: - -- A `StatefulSet` which runs a PostgreSQL pod backed with a `PersistenceVolume` - to store Kong's configuration. -- An internal `Service` which resolves to the PostgreSQL pod. This ensures - that Kong can find the PostgreSQL instance using DNS inside - the Kubernetes cluster. -- A batch `Job` to run schema migrations. This is required to be executed once - to install bootstrap Kong's database schema. - Please note that on an any upgrade for Kong version, another `Job` will - need to be created if the newer version contains any migrations. - -To figure out if you should be using a database or not, please refer to the -[database](#database) section below. - -## Deployment options - -Following are the difference options to consider while deploying the -{{site.kic_product_name}} for your specific use case: - -- [**Kubernetes Service Type**](#kubernetes-service-types): - Chose between Load Balancer vs Node-Port -- [**Database**](#database): - Backing Kong with a Database or running without a database -- [**Multiple Ingress Controllers**](#multiple-ingress-controllers): - Running multiple {{site.kic_product_name}}s inside the same Kubernetes cluster -- [**Runtime**](#runtime): - Using Kong or {{site.ee_product_name}} (for {{site.ee_product_name}} customers) - -### Kubernetes Service Types - -Once deployed, any Ingress Controller needs to be exposed outside the -Kubernetes cluster to start accepting external traffic. -In Kubernetes, `Service` abstraction is used to expose any application -to the rest of the cluster or outside the cluster. - -If your Kubernetes cluster is running in a cloud environment, where -Load Balancers can be provisioned with relative ease, it is recommended -that you use a Service of type `LoadBalancer` to expose Kong to the outside -world. For the Ingress Controller to function correctly, it is also required -that a L4 (or TCP) Load Balancer is used and not an L7 (HTTP(s)) one. - -If your Kubernetes cluster doesn't support a service of type `LoadBalancer`, -then it is possible to use a service of type `NodePort`. - -### Database - -Until Kong 1.0, a database was required to run Kong. -Kong 1.1 introduced a new mode, DB-less, in which Kong can be configured -using a config file, and removes the need to use a database. - -It is possible to deploy and run the {{site.kic_product_name}} with or without a -database. The choice depends on the specific use-case and results in no -loss of functionality. - -#### Without a database - -In DB-less deployments, Kong's Kubernetes ingress controller runs -alongside and dynamically configures -Kong as per the changes it receives from the Kubernetes API server. - -Following figure shows how this deployment looks like: - -![Kong DB-less](/assets/images/products/kubernetes-ingress-controller/dbless-deployment.png "Kong DB-less architecture") - -In this deployment, only one Deployment is required, which is comprised of -a Pod with two containers, a Kong container which proxies the requests -and a controller container which configures Kong. - -`kong-proxy` service would point to the ports of the Kong container in the -above deployment. - -Since each pod contains a controller and a Kong container, scaling out -simply requires horizontally scaling this deployment to handle more traffic -or to add redundancy in the infrastructure. - -#### With a Database - -In a deployment where Kong is backed by a DB, the deployment architecture -is a little different. - -Please refer to the below figure: - -![Kong with a database](/assets/images/products/kubernetes-ingress-controller/db-deployment.png "Kong with database") - -In this type of deployment, there are two types of deployments created, -separating the control and data flow: - -- **Control-plane**: This deployment consists of a pod(s) running - the controller alongside - a Kong container, which can only configure the database. This deployment - does not proxy any traffic but only configures Kong. If multiple - replicas of this pod are running, a leader election process will ensure - that only one of the pods is configuring Kong's database at a time. -- **Data-plane**: This deployment consists of pods running a - single Kong container which can proxy traffic based on the configuration - it loads from the database. This deployment should be scaled to respond - to change in traffic profiles and add redundancy to safeguard from node - failures. -- **Database**: The database is used to store Kong's configuration and propagate - changes to all the Kong pods in the cluster. All Kong containers, in the - cluster should be able to connect to this database. - -A database driven deployment should be used if your use-case requires -dynamic creation of Consumers and/or credentials in Kong at a scale large -enough that the consumers will not fit entirely in memory. - -## Multiple Ingress Controllers - -It is possible to run multiple instances of the {{site.kic_product_name}} or -run a Kong {{site.kic_product_name}} alongside other Ingress Controllers inside -the same Kubernetes cluster. - -There are a few different ways of accomplishing this: - -- Using `kubernetes.io/ingress.class` annotation: - It is common to deploy Ingress Controllers on a cluster level, meaning - an Ingress Controller will satisfy Ingress rules created in all the namespaces - inside a Kubernetes cluster. - Use the annotation on Ingress and Custom resources to segment - the Ingress resources between multiple Ingress Controllers. - **Warning!** - When you use another Ingress Controller, which is default for cluster - (without set any `kubernetes.io/ingress.class`), be aware of using default `kong` - ingress class. There is special behavior of the default `kong` ingress class, - where any ingress resource that is not annotated is picked up. - Therefore with different ingress class then `kong`, you have to use that - ingress class with every Kong CRD object (plugin, consumer) which you use. -- Namespace based isolation: - {{site.kic_product_name}} supports a deployment option where it will satisfy - Ingress resources in a specific namespace. With this model, one can deploy - a controller in multiple namespaces and they will run in an isolated manner. -- If you are using {{site.ee_product_name}}, you can run multiple Ingress Controllers - pointing to the same database and configuring different Workspaces inside - {{site.ee_product_name}}. With such a deployment, one can use either of the above - two approaches to segment Ingress resources into different Workspaces in - {{site.ee_product_name}}. - -## Runtime - -The {{site.kic_product_name}} is compatible a variety of runtimes: - -### {{site.base_gateway}} (OSS) - -This is the [Open-Source Gateway](https://github.com/kong/kong) runtime. -The Ingress Controller is primarily developed against releases of the -open-source gateway. - -### {{site.ee_product_name}} K8S - -If you are a {{site.ee_product_name}} customer, you have access to two more runtimes. - -The first one, {{site.ee_product_name}} K8S, is an package that takes the Open-Source -{{site.base_gateway}} and adds enterprise-only plugins to it. - -You simply need to deploy {{site.ee_product_name}} K8S instead of the Open-Source -Gateway in-order to take full-advantage of enterprise plugins. - -### {{site.ee_product_name}} - -The {{site.kic_product_name}} is also compatible with the full-blown version of -{{site.ee_product_name}}. This runtime ships with Kong Manager, Kong Portal, and a -number of other enterprise-only features. -[This doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise) provides a high-level -overview of the architecture. - -[k8s-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/design.md b/app/kubernetes-ingress-controller/1.3.x/concepts/design.md deleted file mode 100644 index 62402fec8f0b..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/design.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kong Ingress Controller Design ---- - -## Overview - -The {{site.kic_product_name}} configures Kong -using Ingress resources created inside a Kubernetes cluster. - -The {{site.kic_product_name}} is made up of two components: - -- Kong, the core proxy that handles all the traffic -- Controller, a process that syncs the configuration from Kubernetes to Kong - -The {{site.kic_product_name}} performs more than just proxying the traffic coming -into a Kubernetes cluster. It is possible to configure plugins, -load balancing, health checking and leverage all that Kong offers in a -standalone installation. - -The following figure shows how it works: - -![high-level-design](/assets/images/products/kubernetes-ingress-controller/high-level-design.png "High Level Design") - -The Controller listens for changes happening inside the Kubernetes -cluster and updates Kong in response to those changes to correctly -proxy all the traffic. - -Kong is updated dynamically to respond to changes around scaling, -configuration changes, failures that are happening inside a Kubernetes -cluster. - -## Translation - -Kubernetes resources are mapped to Kong resources to correctly -proxy all the traffic. - -The following figure describes the mapping between Kubernetes concepts -to Kong's configuration: - -![translating k8s to kong](/assets/images/products/kubernetes-ingress-controller/k8s-to-kong.png "Translating k8s resources to Kong") - -Let's go through how Kubernetes resources are being mapped to Kong's -configuration: - -- An [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) - resource in Kubernetes defines a set of rules for proxying - traffic. These rules corresponds to the concept of Route in Kong. -- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - inside Kubernetes is a way to abstract an application that is - running on a set of pods. - This maps to two objects in Kong: Service and Upstream. - The service object in Kong holds the information on the protocol - to use to talk to the upstream service and various other protocol - specific settings. The Upstream object defines load balancing - and healthchecking behavior. -- Pods associated with a Service in Kubernetes map as a Target belonging - to the Upstream (the upstream corresponding to the Kubernetes - Service) in Kong. Kong load balances across the Pods of your service. - This means that all requests flowing through Kong are not directed via - kube-proxy but directly to the pod. - -For more information on how Kong works with Routes, Services, and Upstreams, -please see the [Proxy](/gateway/latest/reference/proxy/) -and [Load balancing](/gateway/latest/reference/loadbalancing/) references. diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/ha-and-scaling.md b/app/kubernetes-ingress-controller/1.3.x/concepts/ha-and-scaling.md deleted file mode 100644 index 86b6e710c9f4..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/ha-and-scaling.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: High-availability and Scaling ---- - -## High availability - -The {{site.kic_product_name}} is designed to be reasonably easy to operate and -be highly available, meaning, when some expected failures do occur, the -Controller should be able to continue to function with minimum possible -service disruption. - -The {{site.kic_product_name}} is composed of two parts: 1. Kong, which handles -the requests, 2. Controller, which configures Kong dynamically. - -Kong itself can be deployed in a Highly available manner by deploying -multiple instances (or pods). Kong nodes are state-less, meaning a Kong pod -can be terminated and restarted at any point of time. - -The controller itself can be stateful or stateless, depending on if a database -is being used or not. - -If a database is not used, then the Controller and Kong are deployed as -colocated containers in the same pod and each controller configures the Kong -container that it is running with. - -For cases when a database is necessary, the Controllers can be deployed -on multiple zones to provide redudancy. In such a case, a leader election -process will elect one instance as a leader, which will manipulate Kong's -configuration. - -### Leader election - -The {{site.kic_product_name}} performs a leader-election when multiple -instances of the controller are running to ensure that only a single Controller -is actively pushing changes to Kong's database (when running in DB-mode). -If multiple controllers are making changes to the database, it is possible that -the controllers step over each other. -If an instance of the controller fails, any other container which is a follower, -takes up the leadership and then continues syncing Kong's configuration from -Kubernetes. - -For this reason, the Controller needs permission to create a ConfigMap. -By default, the permission is given at Cluster level but it can be narrowed -down to a single namespace (using Role and RoleBinding) for a stricter RBAC -policy. - -It also needs permission to read and update this ConfigMap. -This permission can be specific to the ConfigMap that is being used -for leader-election purposes. -The name of the ConfigMap is derived from the value of election-id CLI flag -(default: `ingress-controller-leader`) and -ingress-class (default: `kong`) as: "-". -For example, the default ConfigMap that is used for leader election will -be "ingress-controller-leader-kong", and it will be present in the same -namespace that the controller is deployed in. - -## Scaling - -Kong is designed to be horizontally scalable, meaning as traffic increases, -multiple instances of Kong can be deployed to handle the increase in load. - -The configuration is either pumped into Kong directly via the Ingress -Controller or loaded via the database. Kong containers can be considered -stateless as the configuration is either loaded from the database (and -cached heavily in-memory) or loaded in-memory directly via a config file. - -One can use a `HorizontalPodAutoscaler` (HPA) based on metrics -like CPU utilization, bandwidth being used, total request count per second -to dynamically scale {{site.kic_product_name}} as the traffic profile changes. diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-classes.md b/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-classes.md deleted file mode 100644 index 85790f0dbd66..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-classes.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Kong Ingress Controller and Ingress Class ---- - -## Introduction - -The {{site.kic_product_name}} uses ingress classes to filter Kubernetes Ingress -objects and other resources before converting them into Kong configuration. -This allows it to coexist with other ingress controllers and/or other -deployments of the {{site.kic_product_name}} in the same cluster: a -{{site.kic_product_name}} will only process configuration marked for its use. - -## Configuring the controller ingress class - -The `--ingress-class` flag (or `CONTROLLER_INGRESS_CLASS` environment variable) -specify the ingress class expected by the {{site.kic_product_name}}. By default, -it expects the `kong` class. - -## Loading resources by class - -The {{site.kic_product_name}} translates a variety of Kubernetes resources into -Kong configuration. Broadly speaking, we can separate these resources into two -categories: - -- Resources that the controller translates directly into Kong configuration. -- Resources referenced by some other resource, where the other resource is - directly translated into Kong configuration. - -For example, an Ingress is translated directly into a Kong route, and a -KongConsumer is translated directly into a -[Kong consumer](/gateway/api/admin-ee/latest/#/Consumers/list-consumer/). A Secret containing -an authentication plugin credential is _not_ translated directly: it is only -translated into Kong configuration if a KongConsumer resource references it. - -Because they create Kong configuration independent of any other resources, -directly-translated resources require an ingress class, and their class must -match the class configured for the controller. Referenced resources do not -require a class, but must be referenced by a directly-translated resource -that matches the controller. - -### Adding class information to resources - -Most resources use a [kubernetes.io/ingress-class annotation][class-annotation] -to indicate their class. There are several exceptions: - -- v1 Ingress resources have a dedicated `class` field. -- Knative Services [use the class specified][knative-class] by the - `ingress.class` key of the Knative installation's `config-network` ConfigMap. - You can optionally [override this on a per-Service basis][knative-override] - by adding a `networking.knative.dev/ingress.class` annotation to the Service. - -### Enabling support for classless resources - -Specifying a class is optional for some resources. Although specifying a class -is recommended, you can instruct the controller to process resources without a -class annotation using flags: - -- `--process-classless-ingress-v1beta1` instructs the controller to translate - v1beta1 Ingress resources with no class annotation. -- `--process-classless-kong-consumer` instructs the controller to translate - KongConsumer resources with no class annotation. - -These flags are primarily intended for compatibility with older configuration -({{site.kic_product_name}} before 0.10 had less strict class -requirements, and it was common to omit class annotations). If you are creating -new configuration and do not have older configuration without class -annotations, recommended best practice is to add class information to Ingress -and KongConsumer resources and not set the above flags. Doing so avoids -accidentally creating duplicate configuration in other ingress controller -instances. - -These flags do not _ignore_ `ingress.class` annotations: they allow resources -with no such annotation, but will not allow resource that have a non-matching -`ingress.class` annotation. - -## When to use a custom class - -Using the default `kong` class is fine for simpler deployments, where only one -{{site.kic_product_name}} instance is running in a cluster. Changing the class is -typical when: - -- You install multiple Kong environments in one Kubernetes cluster to handle - different types of ingress traffic, e.g. when using separate Kong instances - to handle traffic on internal and external load balancers, or deploying - different types of non-production environments in a single test cluster. -- You install multiple controller instances alongside a single Kong cluster to - separate configuration into different Kong workspaces (using the - `--kong-workspace` flag) or to restrict which Kubernetes namespaces any one - controller instance has access to. - -## Legacy behavior - -This overview covers behavior in {{site.kic_product_name}} version 0.10.0 onward. -Earlier versions had a special case for the default class and a bug affecting -custom classes: - -- When using the default `kong` class, the controller would always process - classless resources in addition to `kong`-class resources. When using a - non-default controller class, the controller would only process resources - with that class, not classless resources. Although this was by design, it was - a source of user confusion. -- When using a custom controller class, some resources that should not have - required a class (because they were referenced by other resources) - effectively did require a class: while these resources were loaded initially, - the controller would not track updates to them unless they had a class - annotation. - -In versions 0.10.0+ you must instruct the controller to load classless -resources, which is allowed (but not recommended) for either the default or -custom classes. Resources referenced by another resource are always loaded and -updated correctly regardless of which class you set on the controller; you do -not need to add class annotations to these resources when using a custom class. - -## Examples - -Typical configurations will include a mix of resources that have class -information and resources that are referenced by them. For example, consider -the following configuration for authenticating a request, using a KongConsumer, -credential Secret, Ingress, and KongPlugin (a Service is implied, but not -shown): - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: dyadya-styopa - annotations: - kubernetes.io/ingress.class: "kong" -username: styopa -credentials: -- styopa-key - ---- - -kind: Secret -apiVersion: v1 -stringData: - key: bylkogdatomoryakom - kongCredType: key-auth -metadata: - name: styopa-key - ---- - -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: ktonezhnaet - annotations: - kubernetes.io/ingress.class: "kong" - konghq.com/plugins: "key-auth-example" -spec: - rules: - - http: - paths: - - path: /vsemznakom - backend: - serviceName: httpbin - servicePort: 80 - ---- - -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: key-auth-example -plugin: key-auth -``` - -The KongConsumer and Ingress resources both have class annotations, as they are -resources that the controller uses as a basis for building Kong configuration. -The Secret and KongPlugin _do not_ have class annotations, as they are -referenced by other resources that do. - -[class-annotation]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#kubernetesioingressclass -[knative-class]: /kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/#ingress-class -[knative-override]: https://knative.dev/docs/serving/services/ingress-class/ diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-versions.md b/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-versions.md deleted file mode 100644 index 6140d472d5df..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/ingress-versions.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Ingress v1 and v1beta1 Differences ---- - -## Introduction - -Kubernetes 1.19 introduced a new `networking.k8s.io/v1` API for the [Ingress resource][kubernetes-ingress-doc]. -It standardizes common practices and clarifies implementation requirements that -were previously up to individual controller vendors. This document covers those -changes as they relate to {{site.kic_product_name}} and provides sample -equivalent `networking.k8s.io/v1beta1` and `networking.k8s.io/v1` resources for comparison. - -## Paths - -Both Ingress v1beta1 and v1 HTTP rules require a path, which represents a [URI -path][uri-rfc-paths]. Although v1beta1 had specified that paths were [POSIX -regular expressions][posix-regex] and enforced this, in practice most -controllers used other implementations that did not match the -specification. v1 seeks to reduce confusion by introducing several [path -types][path-types] and lifting restrictions on regular expression grammars used -by controllers. - -### networking.k8s.io/v1beta1 - -The controller passes paths directly to Kong and relies on its [path handling -logic][kong-paths]. The Kong proxy treats paths as a prefix unless they include -characters [not allowed in RFC 3986 paths][uri-rfc-paths], in which case the -proxy assumes they are a regular expression, and does not treat slashes as -special characters. For example, the prefix `/foo` can match any of the -following: - -``` -/foo -/foo/ -/foobar -/foo/bar -``` - -### networking.k8s.io/v1 - -Although v1 Ingresses provide path types with more clearly-defined logic, the -controller must still create Kong routes and work within the Kong proxy's -routing logic. As such, the controller translates Ingress rule paths to create -Kong routes that match one of the following specifications: `Exact`, `Prefix`, or `ImplementationSpecific`. - -#### Exact - -If `pathType` is `Exact`, the controller creates a Kong route with a regular -expression that matches the rule path only. For example, an exact rule for `/foo` in an -Ingress translates to a Kong route with a `/foo$` regular expression path. - -#### Prefix - -If `pathType` is `Prefix`, the controller creates a Kong route with two path -criteria. For example, `/foo` will create a route with a `/foo$` regular expression and -`/foo/` plain path. - -#### ImplementationSpecific - -The controller leaves `ImplementationSpecific` path rules entirely up to the Kong -router. It creates a route with the exact same path string as the Ingress rule. - -{:.important} -> Both `Prefix` and `Exact` paths modify the paths you - provide, and those modifications may interfere with user-provided regular - expressions. If you are using your own regular expressions in paths, use - `ImplementationSpecific` to avoid unexpected behavior. - -## Ingress class - -[Ingress class][ingress-class] indicates which resources an ingress controller -should process. It provides a means to separate out configuration intended for -other controllers or other instances of the {{site.kic_product_name}}. - -In v1beta1, ingress class was handled informally using -`kubernetes.io/ingress.class` [annotations][deprecated-annotation]. v1 -introduces a new [IngressClass resource][ingress-class-api] which provides -richer information about the controller. v1 Ingresses are bound to a class via -their `ingressClassName` field. - -For example, consider this v1beta1 Ingress: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kubernetes.io/ingress.class: "kong" -spec: - rules: - - host: example.com - http: - paths: - - path: /test - backend: - serviceName: echo - servicePort: 80 -``` - -Its ingress class annotation is set to `kong`, and ingress controllers set to -process `kong` class Ingresses will process it. - -In v1, the equivalent configuration declares a `kong` IngressClass resource -whose `metadata.name` field indicates the class name. The `ingressClassName` -value of the Ingress object must match the value of the `name` field in the -IngressClass metadata: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: kong -spec: - controller: ingress-controllers.konghq.com/kong ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - service: - name: test - port: - number: 80 -``` - -## Hostnames - -Ingress v1 formally codifies support for [wildcard hostnames][wildcard-hostnames]. -v1beta1 Ingresses did not reject wildcard hostnames, however, and Kong had -[existing support for them][kong-wildcard-hostnames]. - -As such, while the v1beta1 specification did not officially support wildcard -hostnames, you can use wildcard hostnames with either version. Setting a -hostname like `*.example.com` will match requests for both `foo.example.com` -and `bar.example.com` with either v1 or v1beta1 Ingresses. - -## Backend types - -Ingress v1 introduces support for backends other than Kubernetes Services through -[resource backends][resource-backends]. - -Kong does not support any dedicated resource backend configurations, though it -does have support for Routes without Services in some cases (for example, when -using the [AWS Lambda plugin][lambda-plugin]). For these routes, you should -create a placeholder Kubernetes Service for them, using an [ExternalName -Service][external-name] with an RFC 2606 invalid hostname, e.g. -`kong.invalid`. You can use these placeholder services with either v1 or -v1beta1 Ingresses. - -[kubernetes-ingress-doc]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-class]: /kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes -[uri-rfc-paths]: https://tools.ietf.org/html/rfc3986#section-3.3 -[posix-regex]: https://www.boost.org/doc/libs/1_38_0/libs/regex/doc/html/boost_regex/syntax/basic_extended.html -[path-types]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -[kong-paths]: /gateway/latest/reference/proxy/#request-path -[wildcard-hostnames]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards -[kong-wildcard-hostnames]: /gateway/latest/reference/proxy/#using-wildcard-hostnames -[resource-backends]: https://kubernetes.io/docs/concepts/services-networking/ingress/#resource-backend -[lambda-plugin]: /hub/kong-inc/aws-lambda/ -[external-name]: https://kubernetes.io/docs/concepts/services-networking/service/#externalname -[deprecated-annotation]: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation -[ingress-class-api]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-class-v1/ diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/k4k8s-with-kong-enterprise.md b/app/kubernetes-ingress-controller/1.3.x/concepts/k4k8s-with-kong-enterprise.md deleted file mode 100644 index 4331901f398f..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/k4k8s-with-kong-enterprise.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -Kong for Kubernetes is a {{site.kic_product_name}} built on top -of Open-Source {{site.base_gateway}}. - -If you are an Enterprise customer, you have an option of running the -[Enterprise version](/gateway/latest/install-and-run/kubernetes/) -of the Ingress Controller, which includes -all the Enterprise plugins but does not include Kong Manager or any -other Enterprise features. This makes it possible to -run the Ingress layer without a database, providing a very low -operational and maintenance footprint. - -However, in some cases, those enterprise features are necessary, -and for such use-cases we support another deployment - Kong for -Kubernetes with {{site.ee_product_name}}. - -As seen in the diagram below, this deployment consists of -Kong for Kubernetes deployed in Kubernetes, and is hooked up with -a database. If there are services running outside Kubernetes, -a regular {{site.base_gateway}} proxy can be deployed there and connected to the -same database. This provides a single pane of visibility of -all services that are running in your infrastructure. - -![architecture-overview](/assets/images/products/kubernetes-ingress-controller/k4k8s-with-kong-enterprise.png "K4K8S with {{site.ee_product_name}}") - -In this deployment model, the database for Kong can be hosted anywhere. -It can be a managed DBaaS service like Amazon RDS, Google Cloud -SQL or a Postgres instance managed in-house or even an instance -deployed on Kubernetes. -If you are following this model, please keep in mind the following: -- It is recommended to not deploy Postgres on Kubernetes, - due to the fact that running stateful applications on Kubernetes - is challenging to get right. -- Ensure that you have the same image/package of {{site.ee_product_name}} - running across the fleet. This means that all Kong instances that are - connected to the same database must use the - same version of {{site.ee_product_name}} package. - -[This guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise) -walks through the setup of the above architecture. diff --git a/app/kubernetes-ingress-controller/1.3.x/concepts/security.md b/app/kubernetes-ingress-controller/1.3.x/concepts/security.md deleted file mode 100644 index bbb08cd9073b..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/concepts/security.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Security ---- - -This document explains the security aspects of the {{site.kic_product_name}}. - -The {{site.kic_product_name}} communicates with Kubernetes API-server and Kong's -Admin API. APIs on both sides offer authentication/authorization features -and the controller integrates with them gracefully. - -## Kubernetes RBAC - -The {{site.kic_product_name}} is deployed with RBAC permissions as explained in the -[deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment) document. -It has read and list permissions on most resources but requires update -and create permission for a few resources to provide seamless integration. -The permissions can be locked down further if needed depending on the specific -use-case. -This RBAC policy is associated with a ServiceAccount and the ServiceAccount -is associated with the {{site.kic_product_name}}. -The Controller uses the ServiceAccount credential to authenticate and -authorize itself against the Kubernetes API-server. - -## Kong Admin API Protection - -Kong's Admin API is used to control configuration of Kong and proxying behavior. -If an attacker happens to gain access to Kong's Admin API, they -will be able to perform all actions as an authorized user like -modifying or deleting Kong's configuration. -Hence, it is important that the deployment -ensures that the likelihood of this happening is as small as possible. - -In the example deployments, the Controller and Kong's Admin API communicate -over the loopback (`lo`) interface of the pod. -Kong is not performing any kind of authorization or -authentication on the Admin API, hence the API is accessible only -on the loopback interface to limit the attack surface. -Although not ideal, this setup requires fewer steps -to get started and can be further hardened as required. - -Please note that it is very important that Kong's Admin API is not accessible -inside the cluster as any malicious service can change Kong's configuration. -If you're exposing Kong's Admin API itself outside the cluster, please ensure -that you have the necessary authentication in place first. - -### Authentication on Kong's Admin API - -If Kong's Admin API is protected with one of the authentication plugins, -the Controller can authenticate itself against it to add another layer of -security. -The Controller comes with support for injecting arbitrary HTTP headers -in the requests it makes to Kong's Admin API, which can be used to inject -authentication credentials. -The headers can be specified using the CLI flag `--kong-admin-header` in the Ingress -Controller. - -The Ingress Controller will support mutual-TLS-based authentication on Kong's Admin -API in future. - -### {{site.ee_product_name}} RBAC - -{{site.ee_product_name}} comes with support for authentication and authorization on -Kong's Admin API. - -Once an RBAC token is provisioned, the {{site.kic_product_name}} can use the RBAC -token to authenticate against {{site.ee_product_name}}. Use the `--kong-admin-header` CLI -flag to pass the RBAC token the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/admission-webhook.md b/app/kubernetes-ingress-controller/1.3.x/deployment/admission-webhook.md deleted file mode 100644 index 23ee8d5da9fb..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/admission-webhook.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Validating Admission Controller ---- - -The {{site.kic_product_name}} ships with an Admission Controller for KongPlugin -and KongConsumer resources in the `configuration.konghq.com` API group. - -The Admission Controller needs a TLS certificate and key pair which -you need to generate as part of the deployment. - -Following guide walks through a setup of how to create the required key-pair -and enable the admission controller. - -Please note that this requires {{site.kic_product_name}} >= 0.6 to be -already installed in the cluster. - -## tldr; - -If you are using the stock YAML manifests to install and setup Kong for -Kubernetes, then you can setup the admission webhook using a single command: - -```bash -curl -sL https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/hack/deploy-admission-controller.sh | bash - -``` - -This script takes all the following commands and packs them together. -You need `kubectl` and `openssl` installed on your workstation for this to -work. - -## Create a certificate for the admission controller - -Kuberentes API-server makes an HTTPS call to the Admission Controller to verify -if the custom resource is valid or not. For this to work, Kubernetes API-server -needs to trust the CA certificate that is used to sign Admission Controller's -TLS certificate. - -This can be accomplished either using a self-signed certificate or using -Kubernetes CA. Follow one of the steps below and then go to -[Create the secret](#create-the-secret) step below. - -Please note the `CN` field of the x509 certificate takes the form -`..svc`, which -in the default case is `kong-validation-webhook.kong.svc`. - -### Using self-signed certificate - -Use openssl to generate a self-signed certificate: - -```bash -$ openssl req -x509 -newkey rsa:2048 -keyout tls.key -out tls.crt -days 365 \ - -nodes -subj "/CN=kong-validation-webhook.kong.svc" \ - -extensions EXT -config <( \ - printf "[dn]\nCN=kong-validation-webhook.kong.svc\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:kong-validation-webhook.kong.svc\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -Generating a 2048 bit RSA private key -..........................................................+++ -.............+++ -writing new private key to 'key.pem' -``` - -### Using in-built Kubernetes CA - -Kubernetes comes with an in-built CA which can be used to provision -a certificate for the Admission Controller. -Please refer to the -[this guide](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) -on how to generate a certificate using the in-built CA. - -### Create the secret - -Next, create a Kubernetes secret object based on the key and certificate that -was generatd in the previous steps. -Here, we assume that the PEM-encoded certificate is stored in a file named -`tls.crt` and private key is stored in `tls.key`. - -```bash -$ kubectl create secret tls kong-validation-webhook -n kong \ - --key tls.key --cert tls.crt -secret/kong-validation-webhook created -``` - -## Update the deployment - -Once the secret is created, update the Ingress Controller deployment: - -Execute the following command to patch the {{site.kic_product_name}} deployment -to mount the certificate and key pair and also enable the admission controller: - -```bash -$ kubectl patch deploy -n kong ingress-kong \ - -p '{"spec":{"template":{"spec":{"containers":[{"name":"ingress-controller","env":[{"name":"CONTROLLER_ADMISSION_WEBHOOK_LISTEN","value":":8080"}],"volumeMounts":[{"name":"validation-webhook","mountPath":"/admission-webhook"}]}],"volumes":[{"secret":{"secretName":"kong-validation-webhook"},"name":"validation-webhook"}]}}}}' -deployment.extensions/ingress-kong patched -``` - -## Enable the validating admission - -If you are using Kubernetes CA to generate the certificate, you don't need -to supply a CA certificate (in the `caBunde` param) -as part of the Validation Webhook configuration -as the API-server already trusts the internal CA. - -```bash -$ echo "apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: kong-validations -webhooks: -- name: validations.kong.konghq.com - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: ["v1beta1"] - rules: - - apiGroups: - - configuration.konghq.com - apiVersions: - - '*' - operations: - - CREATE - - UPDATE - resources: - - kongconsumers - - kongplugins - - apiGroups: - - '' - apiVersions: - - 'v1' - operations: - - CREATE - - UPDATE - resources: - - secrets - clientConfig: - service: - namespace: kong - name: kong-validation-webhook - caBundle: $(cat tls.crt | base64 -w 0) " | kubectl apply -f - -``` - -## Verify if it works - -### Verify duplicate KongConsumers - -Create a KongConsumer with username as `harry`: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, create another KongConsumer with the same username: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry2 - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: consumer already exists -``` - -The validation webhook rejected the KongConsumer resource as there already -exists a consumer in Kong with the same username. - -### Verify incorrect KongPlugins - -Try to create the folowing KongPlugin resource. -The `foo` config property does not exist in the configuration definition and -hence the Admission Controller returns back an error. -If you remove the `foo: bar` configuration line, the plugin will be -created succesfully. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - foo: bar - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: 400 Bad Request {"fields":{"config":{"foo":"unknown field"}},"name":"schema violation","code":2,"message":"schema violation (config.foo: unknown field)"} -``` - -### Verify incorrect credential secrets - -With 0.7 and above versions of the controller, validations also take place -for incorrect secret types and wrong parameters to the secrets: - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=basic-auth \ - --from-literal=username=foo -Error from server: admission webhook "validations.kong.konghq.com" denied the request: missing required field(s): password -``` - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=wrong-auth \ - --from-literal=sdfkey=my-sooper-secret-key -Error from server: admission webhook "validations.kong.konghq.com" denied the request: invalid credential type: wrong-auth -``` diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/aks.md b/app/kubernetes-ingress-controller/1.3.x/deployment/aks.md deleted file mode 100644 index 1c1aadf165d2..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/aks.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong Ingress on Azure Kubernetes Service (AKS) ---- - -## Requirements - -1. A fully functional AKS cluster. - Please follow Azure's Guide to - [set up an AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the AKS Kubernetes - cluster we will work on. The above AKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It will take a few minutes for all containers to start and report -healthy status. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Set up environment variables - -Next, set up an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's set up an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Microsoft Azure to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/eks.md b/app/kubernetes-ingress-controller/1.3.x/deployment/eks.md deleted file mode 100644 index 209a750f4edf..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/eks.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Kong Ingress on Elastic Kubernetes Service (EKS) ---- - -## Requirements - -1. A fully functional EKS cluster. - Please follow Amazon's Guide to - [set up an EKS cluster](https://aws.amazon.com/getting-started/projects/deploy-kubernetes-app-amazon-eks/). -2. Basic understanding of Kubernetes -3. A working `kubectl` linked to the EKS Kubernetes - cluster we will work on. The above EKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It may take a few minutes for all containers to start and report -healthy statuses. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, create an environment variable with the IP address at which -Kong is accessible. This IP address sends requests to the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 example.eu-west-1.elb.amazonaws.com 80:31929/TCP,443:31408/TCP 57d -``` - -Create an environment variable to hold the ELB hostname: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].hostname}" service -n kong kong-proxy) -``` - -> Note: It may take some time for Amazon to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## TLS configuration - -Versions of Kong prior to 2.0.0 default to using [the "modern" cipher suite -list](https://wiki.mozilla.org/Security/Server_Side_TLS). This is not -compatible with ELBs when the ELB terminates TLS at the edge and establishes a -new session with Kong. This error will appear in Kong's logs: - -``` -*7961 SSL_do_handshake() failed (SSL: error:1417A0C1:SSL routines:tls_post_process_client_hello:no shared cipher) while SSL handshaking -``` - -To correct this issue, set `KONG_SSL_CIPHER_SUITE=intermediate` in your -environment variables. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/gke.md b/app/kubernetes-ingress-controller/1.3.x/deployment/gke.md deleted file mode 100644 index 52de2a69d458..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/gke.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Kong Ingress on Google Kubernetes Engine (GKE) ---- - -## Requirements - -1. A fully functional GKE cluster. - The easiest way to do this is to do it via the web UI: - Go to Google Cloud's console > Kubernetes Engine > Cluster > - Create a new cluster. - This documentation has been tested on a zonal cluster in - europe-west-4a using 1.10.5-gke.4 as Master version. - The default pool has been assigned 2 nodes of kind 1VCPU - with 3.75GB memory (default setting). - The OS used is COS (Container Optimized OS) and the auto-scaling - has been enabled. Default settings are being used except for - `HTTP load balancing` which has been disabled (you probably want to use - Kong features for this). For more information on GKE clusters, - refer to - [the GKE documentation](https://cloud.google.com/kubernetes-engine/docs/). -1. If you wish to use a static IP for Kong, you have to reserve a static IP - address (in Google Cloud's console > VPC network > - External IP addresses). For information, - you must create a regional IP - global is not supported as `loadBalancerIP` yet) -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the GKE Kubernetes - cluster we will work on. For information, you can associate a new `kubectl` - context by using: - - ```bash - gcloud container clusters get-credentials --zone --project - ``` - -## Update User Permissions - -> Because of [the way Kubernetes Engine checks permissions -when you create a Role or ClusterRole](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control), you must -first create a RoleBinding that grants you all of -the permissions included in the role you want to create. -An example workaround is to create a RoleBinding that -gives your Google identity a cluster-admin role -before attempting to create additional Role or -ClusterRole permissions. -This is a known issue in RBAC in Kubernetes and -Kubernetes Engine versions 1.6 and -later. - -A fast workaround: - -```yaml - -echo -n " -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: cluster-admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: User - name: # usually the Google account - # e.g.: example@testorg.com - namespace: kube-system" | kubectl apply -f - - -``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Google to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s-enterprise.md b/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s-enterprise.md deleted file mode 100644 index c81b8ab1718e..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s-enterprise.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Kong for Kubernetes Enterprise ---- - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -## Prerequisites - -Before we can deploy Kong, we need to satisfy one prerequisite: create a license -secret. - -To create this secret, provision the `kong` namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -Enterprise version requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -If you do not have one, please contact your sales representative. -Save the license file temporarily to disk with filename `license` -and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -## Installers - -Once the secret is in-place, we can proceed with installation. - -Kong for Kubernetes can be installed using an installer of -your choice: - -### YAML manifests - -Execute the following to install Kong for Kubernetes Enterprise using YAML -manifests: - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml -``` - -It takes a few minutes the first time this setup is done. - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-6ffcf8c447-5qv6z 2/2 Running 1 44m -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.254.78 35.233.198.16 80:32697/TCP,443:32365/TCP 22h -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP address assigned to the service. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for your cloud provider to actually associate the -IP address to the `kong-proxy` Service. - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes Enterprise: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/config/variants/enterprise -``` - -You can use the above URL as a base kustomization and build on top of it -as well. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name - --namespace kong \ - -f values.yaml \ - --set ingressController.installCRDs=false -``` - -### Example values.yaml -``` -image: - repository: kong/kong-gateway - tag: 2.2.1.0-alpine -env: - LICENSE_DATA: - valueFrom: - secretKeyRef: - name: kong-enterprise-license - key: license -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes Enterprise - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s.md b/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s.md deleted file mode 100644 index 1e66edc96a0c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/k4k8s.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong for Kubernetes ---- - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -## Installers - -Kong for Kubernetes can be installed using an installer of -your choice. - -Once you've installed Kong for Kubernetes, -jump to the [next section](#using-kong-for-kubernetes) -on using it. - -### YAML manifests - -Please pick one of the following guides depending on your platform: - -- [Minikube](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/) -- [Google Kubernetes Engine(GKE) by Google](/kubernetes-ingress-controller/{{page.release}}/deployment/gke/) -- [Elastic Kubernetes Service(EKS) by Amazon](/kubernetes-ingress-controller/{{page.release}}/deployment/eks/) -- [Azure Kubernetes Service(AKS) by Microsoft](/kubernetes-ingress-controller/{{page.release}}/deployment/aks/) - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/config/base -``` - -You can use the above URL as a base kustomization and build on top of it -to make it suite better for your cluster and use-case. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes - -Once you've installed Kong for Kubernetes, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/kong-enterprise.md b/app/kubernetes-ingress-controller/1.3.x/deployment/kong-enterprise.md deleted file mode 100644 index e2f7648a283c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/kong-enterprise.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -This guide walks through setting up the {{site.kic_product_name}} using Kong -Enterprise. This architecture is described in detail in [this doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise/). - -We assume that we start from scratch and you don't have {{site.ee_product_name}} -deployed. For the sake of simplicity, we will deploy {{site.ee_product_name}} and -its database in Kubernetes itself. You can safely run them outside -Kubernetes as well. - -## Prerequisites - -Before we can deploy the {{site.kic_product_name}} with {{site.ee_product_name}}, -we need to satisfy the following prerequisites: -- [{{site.ee_product_name}} License secret](#kong-enterprise-license-secret) -- [{{site.ee_product_name}} bootstrap password](#kong-enterprise-bootstrap-password) - -In order to create these secrets, let's provision the `kong` -namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -{{site.ee_product_name}} requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -Save the license file temporarily to disk and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -### {{site.ee_product_name}} bootstrap password - -Next, we need to create a secret containing the password using which we can login into Kong Manager. -Please replace `cloudnative` with a random password of your choice and note it down. - -```bash -$ kubectl create secret generic kong-enterprise-superuser-password -n kong --from-literal=password=cloudnative -secret/kong-enterprise-superuser-password created -``` - -Once these are created, we are ready to deploy {{site.ee_product_name}} -Ingress Controller. - -## Install - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/single/all-in-one-postgres-enterprise.yaml -``` - -It takes a little while to bootstrap the database. -Once bootstrapped, you should see the {{site.kic_product_name}} running with -{{site.ee_product_name}} as its core: - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-548b9cff98-n44zj 2/2 Running 0 21s -kong-migrations-pzrzz 0/1 Completed 0 4m3s -postgres-0 1/1 Running 0 4m3s -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-admin LoadBalancer 10.63.255.85 34.83.95.105 80:30574/TCP 4m35s -kong-manager LoadBalancer 10.63.247.16 34.83.242.237 80:31045/TCP 4m34s -kong-proxy LoadBalancer 10.63.242.31 35.230.122.13 80:32006/TCP,443:32007/TCP 4m34s -kong-validation-webhook ClusterIP 10.63.240.154 443/TCP 4m34s -postgres ClusterIP 10.63.241.104 5432/TCP 4m34s - -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP assigned to the three LoadBalancer type services. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. If you are running Minikube, you will not get an -external IP address. - -### Setup Kong Manager - -Next, if you browse to the IP address or host of the `kong-manager` service in your Browser, -which in our case is `http://34.83.242.237`. -Kong Manager should load in your browser. -Try logging in to the Manager with the username `kong_admin` -and the password you supplied in the prerequisite, it should fail. -The reason being we've not yet told Kong Manager where it can find the Admin API. - -Let's set that up. We will take the External IP address of `kong-admin` service and -set the environment variable `KONG_ADMIN_API_URI`: - -```bash -KONG_ADMIN_IP=$(kubectl get svc -n kong kong-admin --output=jsonpath='{.status.loadBalancer.ingress[0].ip}') -kubectl patch deployment -n kong ingress-kong -p "{\"spec\": { \"template\" : { \"spec\" : {\"containers\":[{\"name\":\"proxy\",\"env\": [{ \"name\" : \"KONG_ADMIN_API_URI\", \"value\": \"${KONG_ADMIN_IP}\" }]}]}}}}" -``` - -It will take a few minutes to roll out the updated deployment and once the new -`ingress-kong` pod is up and running, you should be able to log into the Kong Manager UI. - -As you follow along with other guides on how to use your newly deployed the {{site.kic_product_name}}, -you will be able to browse Kong Manager and see changes reflected in the UI as Kong's -configuration changes. - -## Using Kong for Kubernetes with {{site.ee_product_name}} - -Let's setup an environment variable to hold the IP address of `kong-proxy` service: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. - -## Customizing by use-case - -The deployment in this guide is a point to start using Ingress Controller. -Based on your existing architecture, this deployment will require custom -work to make sure that it needs all of your requirements. - -In this guide, there are three load-balancers deployed for each of -Kong Proxy, Kong Admin and Kong Manager services. It is possible and -recommended to instead have a single Load balancer and then use DNS names -and Ingress resources to expose the Admin and Manager services outside -the cluster. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/minikube.md b/app/kubernetes-ingress-controller/1.3.x/deployment/minikube.md deleted file mode 100644 index d56a366bb0bd..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/minikube.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Kong Ingress on Minikube ---- - -## Setup Minikube - -1. Install [`minikube`](https://github.com/kubernetes/minikube) - - Minikube is a tool that makes it easy to run Kubernetes locally. - Minikube runs a single-node Kubernetes cluster inside a VM on your laptop - for users looking to try out Kubernetes or develop with it day-to-day. - -1. Start `minikube` - - ```bash - minikube start - ``` - - It will take a few minutes to get all resources provisioned. - - ```bash - kubectl get nodes - ``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -> Note: this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -```bash -$ export PROXY_IP=$(minikube service -n kong kong-proxy --url | head -1) -# If installed by helm, service name would be "-kong-proxy". -# $ export PROXY_IP=$(minikube service -kong-proxy --url | head -1) -$ echo $PROXY_IP -http://192.168.99.100:32728 -``` - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/1.3.x/deployment/overview.md b/app/kubernetes-ingress-controller/1.3.x/deployment/overview.md deleted file mode 100644 index 5a37579b5a99..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/deployment/overview.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Installing and Configuring ---- - -## Getting started - -If you are getting started with Kong for Kubernetes, -install it on Minikube using our Minikube [setup guide](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/). - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## Overview - -The {{site.kic_product_name}} can be installed on a local, managed -or any Kubernetes cluster which supports a service of type `LoadBalancer`. - -As explained in the [deployment document](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), there -are a variety of configurations and runtimes for the {{site.kic_product_name}}. - -The following sections detail on deployment steps for all the different -runtimes: - -## Kong for Kubernetes - - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s/) to deploy Kong for Kubernetes -using an installation method of your choice. - -## Kong for Kubernetes Enterprise - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) to deploy Kong for Kubernetes -Enterprise if you have purchased or are trying out {{site.ee_product_name}}. - -## Kong for Kubernetes with {{site.ee_product_name}} - -Kong for Kubernetes can integrate with {{site.ee_product_name}} to -provide a single pane of visibility across all of your services -that are running in Kubernetes and non-Kubernetes environments. - -This [guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise/) goes into details of -the architecture and how one can set that up. - -## Admission Controller - -The {{site.kic_product_name}} also ships with a Validating -Admission Controller that -can be enabled to verify KongConsumer, KongPlugin and Secret -resources as they are created. -Please follow the [admission-webhook](/kubernetes-ingress-controller/{{page.release}}/deployment/admission-webhook/) deployment -guide to set it up. diff --git a/app/kubernetes-ingress-controller/1.3.x/faq.md b/app/kubernetes-ingress-controller/1.3.x/faq.md deleted file mode 100644 index 8f401a8b6e3c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/faq.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: FAQs ---- - -### Why endpoints and not services? - -The {{site.kic_product_name}} does not use -[Services][k8s-service] to route traffic -to the pods. Instead, it uses the Endpoints API -to bypass [kube-proxy][kube-proxy] -to allow Kong features like session affinity and -custom load balancing algorithms. -It also removes overhead -such as conntrack entries for iptables DNAT. - -### Is it possible to create consumers using the Admin API? - -From version 0.5.0 onwards, the {{site.kic_product_name}} tags each entity -that it manages inside Kong's database and only manages the entities that -it creates. -This means that if consumers and credentials are created dynamically, they -won't be deleted by the Ingress Controller. - -[k8s-service]: https://kubernetes.io/docs/concepts/services-networking/service -[kube-proxy]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/cert-manager.md b/app/kubernetes-ingress-controller/1.3.x/guides/cert-manager.md deleted file mode 100644 index 471b4b29189c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/cert-manager.md +++ /dev/null @@ -1,372 +0,0 @@ ---- -title: Using cert-manager for automated TLS certificate ---- - -This guide will walk through steps to set up the {{site.kic_product_name}} with -cert-manager to automate certificate management using Let's Encrypt. -Any ACME-based CA can be used in-place of Let's Encrypt as well. - -## Before you begin - -You will need the following: - -- Kubernetes cluster that can provision an IP address that is routable from - the Internet. If you don't have one, you can use GKE or any managed k8s - cloud offering. -- A domain name for which you control the DNS records. - This is necessary so that - Let's Encrypt can verify the ownership of the domain and issue a certificate. - In the current guide, we use `example.com`, please replace this with a domain - you control. - -This tutorial was written using Google Kubernetes Engine. - -## Set up the {{site.kic_product_name}} {#set-up-kic} - -Execute the following to install the Ingress Controller: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.example.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -## Set up cert-manager - -Please follow cert-manager's [documentation](https://cert-manager.io/docs/installation/) -on how to install cert-manager onto your cluster. - -Once installed, verify all the components are running using: - -```bash -kubectl get all -n cert-manager -NAME READY STATUS RESTARTS AGE -pod/cert-manager-86478c5ff-mkhb9 1/1 Running 0 23m -pod/cert-manager-cainjector-65dbccb8b6-6dnjl 1/1 Running 0 23m -pod/cert-manager-webhook-78f9d55fdf-5wcnp 1/1 Running 0 23m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cert-manager-webhook ClusterIP 10.63.240.251 443/TCP 23m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/cert-manager 1 1 1 1 23m -deployment.apps/cert-manager-cainjector 1 1 1 1 23m -deployment.apps/cert-manager-webhook 1 1 1 1 23m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cert-manager-86478c5ff 1 1 1 23m -replicaset.apps/cert-manager-cainjector-65dbccb8b6 1 1 1 23m -replicaset.apps/cert-manager-webhook-78f9d55fdf 1 1 1 23m -``` - -## Set up your application - -Any HTTP-based application can be used, for the purpose of the demo, install -the following echo server: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Set up DNS - -Get the IP address of the load balancer for Kong: - -```bash -$ kubectl get service -n kong kong-proxy -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 35.233.170.67 80:31929/TCP,443:31408/TCP 58d -``` - -To get only the IP address: - -```bash -$ kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy -35.233.170.67 -``` - -Please note that the IP address in your case will be different. - -Next, setup a DNS records to resolve `proxy.example.com` to the -above IP address: - -```bash -$ dig +short proxy.example.com -35.233.170.67 -``` - -Next, setup a CNAME DNS record to resolve `demo.example.com` to -`proxy.example.com`. - -```bash -$ dig +short demo.yolo2.com -proxy.example.com. -35.233.170.67 -``` - -## Expose your application to the Internet - -Setup an Ingress rule to expose the application: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -Access your application: - -```bash -$ curl -I demo.example.com -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 21:14:45 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 -``` - -## Request TLS Certificate from Let's Encrypt - -First, setup a ClusterIssuer for cert-manager - -```bash -$ echo "apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod - namespace: cert-manager -spec: - acme: - email: user@example.com #please change this - privateKeySecretRef: - name: letsencrypt-prod - server: https://acme-v02.api.letsencrypt.org/directory - solvers: - - http01: - ingress: - class: kong" | kubectl apply -f - -clusterissuer.cert-manager.io/letsencrypt-prod configured -``` - -*Note*: If you run into issues configuring this, -be sure that the group (`cert-manager.io`) and -version (`v1`) match those in the output of -`kubectl describe crd clusterissuer`. -This directs cert-manager which CA authority to use to issue the certificate. - -Next, update your Ingress resource to provision a certificate and then use it: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: kong -spec: - tls: - - secretName: demo-example-com - hosts: - - demo.example.com - rules: - - host: demo.example.com - http: - paths: - - path: / - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-example-com configured -``` - -Things to note here: - -- The annotation `kubernetes.io/tls-acme` is set to `true`, informing - cert-manager that it should provision a certificate for hosts in this - Ingress using ACME protocol. -- `certmanager.k8s.io/cluster-issuer` is set to `letsencrypt-prod`, directing - cert-manager to use Let's Encrypt's production server to provision a TLS - certificate. -- `tls` section of the Ingress directs the {{site.kic_product_name}} to use the - secret `demo-example-com` to encrypt the traffic for `demo.example.com`. - This secret will be created by cert-manager. - -Once you update the Ingress resource, cert-manager will start provisioning -the certificate and in sometime the certificate will be available for use. - -You can track the progress of certificate issuance: - -```bash -$ kubectl describe certificate demo-example-com -Name: demo-example-com -Namespace: default -Labels: -Annotations: -API Version: certmanager.k8s.io/v1 -Kind: Certificate -Metadata: - Creation Timestamp: 2019-06-21T20:41:54Z - Generation: 1 - Owner References: - API Version: extensions/v1beta1 - Block Owner Deletion: true - Controller: true - Kind: Ingress - Name: demo-example-com - UID: 261d15d3-9464-11e9-9965-42010a8a01ad - Resource Version: 19561898 - Self Link: /apis/certmanager.k8s.io/v1/namespaces/default/certificates/demo-example-com - UID: 014d3f1d-9465-11e9-9965-42010a8a01ad -Spec: - Acme: - Config: - Domains: - demo.example.com - Http 01: - Dns Names: - demo.example.com - Issuer Ref: - Kind: ClusterIssuer - Name: letsencrypt-prod - Secret Name: demo-example-com -Status: - Conditions: - Last Transition Time: 2019-06-21T20:42:20Z - Message: Certificate is up to date and has not expired - Reason: Ready - Status: True - Type: Ready - Not After: 2019-09-19T19:42:19Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Generated 53m cert-manager Generated new private key - Normal GenerateSelfSigned 53m cert-manager Generated temporary self signed certificate - Normal OrderCreated 53m cert-manager Created Order resource "demo-example-com-3811625818" - Normal OrderComplete 53m cert-manager Order "demo-example-com-3811625818" completed successfully - Normal CertIssued 53m cert-manager Certificate issued successfully -``` - -## Test HTTPS - -Once all is in place, you can use HTTPS: - -```bash -$ curl -v https://demo.example.com -* Rebuilt URL to: https://demo.example.com/ -* Trying 35.233.170.67... -* TCP_NODELAY set -* Connected to demo.example.com (35.233.170.67) port 443 (#0) -* ALPN, offering h2 -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/cert.pem - CApath: none -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=demo.example.com -* start date: Jun 21 19:42:19 2019 GMT -* expire date: Sep 19 19:42:19 2019 GMT -* subjectAltName: host "demo.example.com" matched cert's "demo.example.com" -* issuer: C=US; O=Let's Encrypt; CN=Let's Encrypt Authority X3 -* SSL certificate verify ok. -> GET / HTTP/1.1 -> Host: demo.example.com -> User-Agent: curl/7.54.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Connection: keep-alive -< Date: Fri, 21 Jun 2019 21:37:43 GMT -< Server: echoserver -< X-Kong-Upstream-Latency: 1 -< X-Kong-Proxy-Latency: 1 -< Via: kong/1.1.2 -< - - -Hostname: echo-d778ffcd8-52ddj - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-9w4t - pod name: echo-d778ffcd8-52ddj - pod namespace: default - pod IP:10.60.2.246 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.2.239 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://demo.example.com:8080/ - -Request Headers: - accept=*/* - connection=keep-alive - host=demo.example.com - user-agent=curl/7.54.0 - x-forwarded-for=10.138.0.6 - x-forwarded-host=demo.example.com - x-forwarded-port=8443 - x-forwarded-proto=https - x-real-ip=10.138.0.6 - -Request Body: - -no body in request- -``` - -Et voilà ! You've secured your API with HTTPS -with the {{site.kic_product_name}} and cert-manager. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/configure-acl-plugin.md b/app/kubernetes-ingress-controller/1.3.x/guides/configure-acl-plugin.md deleted file mode 100644 index 2ef8aecd4249..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/configure-acl-plugin.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -title: Configuring ACL Plugin ---- - -This guide walks through configuring the Kong ACL Plugin. The ACL Plugin -requires the use of at least one Authentication plugin. This example will use -the JWT Auth Plugin - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create two Ingress rules to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Test the Ingress rules: - -```bash -$ curl -i $PROXY_IP/get -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -``` - -## Add JWT authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. Let's enable JWT authentication - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: app-jwt -plugin: jwt -" | kubectl apply -f - -``` - -Now let's associate the plugin to the Ingress rules we created earlier. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any requests matching the proxying rules for `demo-get` and `demo` post will -now require a valid JWT and the consumer for the JWT to be associate with the -right ACL. - -```bash -$ curl -i $PROXY_IP/get - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} -``` - -You should get a 401 response telling you that the request is not authorized. - -## Provision Consumers - -Let's provision 2 KongConsumer resources: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -" | kubectl apply -f - -``` - -## Secrets - -Next, let's provision some Secrets for the KongConsumers to reference. Each -ACL will need its own Secret and each JWT public key will need its own Secret. -The credential type is specified in the `kongCredType` field. In this -case we'll be using `jwt` and `acl`. You can create a secret using any other -method as well. - -The JWT signing algorithm is set in the `algorithm` field. The if using a -public key like this example it is stored in the `rsa_pulic_key` field. If you -are using a secret signing key, use the `secret` field. The last field to set -if you are using `RS256` or `ES256` is the `key` field. This should match the -`iss` field in the JWT you will be sending. You can check this value by -decoding your JWT over at [https://jwt.io](https://jwt.io) - -Since we are using the Secret resource, Kubernetes will encrypt and store the -JWT signing key and ACL group for us. - -### JWT signing key - -```bash -# create secret for jwt public key -$ kubectl create secret \ - generic app-admin-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="admin-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - MIIBIjA.... - -----END PUBLIC KEY-----" - -# create a second secret with a different key -$ kubectl create secret \ - generic app-user-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="user-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - qwerlkjqer.... - -----END PUBLIC KEY-----" -``` - -Validation requirements impose that even if the `secret` is not used for algorithm -`RS256` or `ES256` the field `secret` must be present, so put some dummy value for it. - -## Assign the credentials - -In order to for the ACL and JWT to be validated by Kong, the secrets will need -to be referenced by the KongConsumers we created earlier. Let's update those. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt -" | kubectl apply -f - -``` - -## Use the credential - -Now to use a JWT to pass authentication. Let's store the user and admin jwt's -in some environment variables. `USER_JWT` and `ADMIN_JWT`. If you are using -an identity provider, you should be able to login and get out a JWT from their -API. If you are generating your own, go through the process of generating your -own. - -Let's test the get route - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - - -``` - -## Adding ACL's - -The JWT plugin doesn't provide the ability to authroize a given issuer to a -given ingress. To do this we need to use the ACL plugin. Let's create an admin -ACL config - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: admin-acl -plugin: acl -config: - whitelist: ['app-admin'] -" | kubectl apply -f - -``` - -Then let's create a user ACL config. We want our admin to be able to access -the same resources as the user, so let's make sure we include them in the -whitelist. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: plain-user-acl -plugin: acl -config: - whitelist: ['app-user','app-admin'] -" | kubectl apply -f - -``` - -Next let's create the secrets that will define the ACL groups. - -```bash -# create secrets for acl groups -$ kubectl create secret \ - generic app-admin-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-admin - -$ kubectl create secret \ - generic app-user-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-user -``` - -After we create the secrets, the consumers need to be updated to reference the -ACL credentials - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt - - app-admin-acl -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt - - app-user-acl -" | kubectl apply -f - -``` - -The last thing to configure is the ingress to use the new plguins. Note, if you -set more than one ACL plugin, the last one supplied will be the only one -evaluated. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt,plain-user-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /get - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt,admin-acl - konghq.com/strip-path: "false" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /post - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Now let's test it. - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-user", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post -HTTP/1.1 403 Forbidden -Date: Mon, 06 Apr 2020 07:11:59 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 45 -X-Kong-Response-Latency: 1 -Server: kong/2.0.2 - -{"message":"You cannot consume this service"} -``` - -The `plain-user` user is not in the `admin-acl` whitelist, and is therefore -unauthorized to access the resource - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 1156 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 07:20:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 4 -X-Kong-Proxy-Latency: 4 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} -``` diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-custom-entities.md b/app/kubernetes-ingress-controller/1.3.x/guides/configuring-custom-entities.md deleted file mode 100644 index 747286a87864..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-custom-entities.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Configuring Custom Entities ---- - -This is an **advanced-level** guide for users using custom entities in Kong. -Most users do not need to use this feature. - -Kong has in-built extensibility with its plugin architecture. -Plugins in Kong have a `config` property where users can store configuration -for any custom plugin and this suffices in most use cases. -In some use cases, plugins define custom entities to store additional -configuration outside the plugin instance itself. -This guide elaborates on how such custom entities can be used with the Kong -Ingress Controller. - -> Note: All entities shipped with Kong are supported by the -{{site.kic_product_name}}out of the box. This guide applies only if you have a -custom entity in your plugin. To check if your plugin contains a custom entity, -the source code will usually contain a `daos.lua` file. -Custom plugins have first-class support in the {{site.kic_product_name}} -via the `KongPlugin` CRD. -Please read [the custom plugin guide](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) instead -if you are only using Custom plugins. - -## Caveats - -- The feature discussed in this guide apply for DB-less deployments of Kong. - The feature is not supported for deployments where Kong is used with a - database or Kong is used in hybrid mode. - For these deployments, configure custom entities directly using Kong's Admin - API. -- Custom entities which have a foreign relation with other core entities in Kong - are not supported. Only entities which can exist by themselves and then - be referenced via plugin configuration are supported. - -## Creating a JSON representation of the custom entity - -In this section, we will learn how to create a JSON representation of -a custom entity. - -Suppose you have a custom entity with the following schema in your plugin source: - -```lua -{ - name = "xkcds", - primary_key = { "id" }, - cache_key = { "name" }, - endpoint_key = "name", - fields = { - { id = typedefs.uuid }, - { - name = { - type= "string", - required = true, - unique = true, - }, - }, - { - url = { - type = "string", - required = true, - }, - }, - { created_at = typedefs.auto_timestamp_s }, - { updated_at = typedefs.auto_timestamp_s }, - }, -} -``` - -An instance of such an entity would look like: - -```json -{ - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "Bobby Drop Tables", - "url": "https://xkcd.com/327/" -} -``` - -Multiple instances of such an entity are represented as follows: - -```json -{ - "xkcds": [ - { - "id": "385def6e-3059-4929-bb12-d205e97284c5", - "name": "bobby_tables", - "url": "https://xkcd.com/327/" - }, - { - "id": "d079a632-ac8d-4a9a-860c-71de82e8fc11", - "name": "compiling", - "url": "https://xkcd.com/303/" - } - ] -} -``` - -If you have more than one custom entities that you would like to configure -then you can create other entities by specifying the entity name at the root -level of the JSON as the key and then a JSON array containing the -custom entities as the value of the key. - -To configure custom entities in a DB-less instance of Kong, -you first need to create such a JSON representation of your entities. - -## Configuring the custom entity secret - -Once you have the JSON representation, we need to store the configuration -inside a Kubernetes Secret. -The following command assumes the filename to be `entities.json` but you can -use any other filename as well: - -```bash -$ kubectl create secret generic -n kong kong-custom-entities --from-file=config=entities.json -secret/kong-custom-entities created -``` - -Some things to note: -- The key inside the secret must be `config`. This is not configurable at the - moment. -- The secret must be accessible by the Ingress Controller. The recommended - practice here is to install the secret in the same namespace in which Kong - is running. - -## Configure the Ingress Controller - -Once you have the secret containing the custom entities configured, -you need to instruct the controller to read the secret and sync the custom -entities to Kong. - -To do this, you need to add the following environment variable to the -`ingress-ccontroller` container: - -```yaml -env: -- name: CONTROLLER_KONG_CUSTOM_ENTITIES_SECRET - value: kong/kong-custom-entities -``` - -This value of the environment variable takes the form of `/`. -You need to configure this only once. - -This instructs the controller to watch the above secret and configure Kong -with any custom entities present inside the secret. -If you change the configuration and update the secret with different entities, -the controller will dynamically fetch the updated secret and configure Kong. - -## Verification - -You can verify that the custom entity was actually created in Kong's memory -using the `GET /xkcds` (endpoint will differ based on the name of the entity) -on Kong's Admin API. -You can forward traffic from your local machine to the Kong Pod to access it: - -```bash -$ kubectl port-forward -n kong KONG-POD-NAME 8444:8444 -``` - -and in a separate terminal: - -```bash - $ curl -k https://localhost:8444/ -``` - -## Using the custom entity - -You can now use reference the custom entity in any of your custom plugin's -`config` object: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: random-xkcd-header -config: - xkcds: - - d079a632-ac8d-4a9a-860c-71de82e8fc11 -plugin: xkcd-header -``` diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-fallback-service.md b/app/kubernetes-ingress-controller/1.3.x/guides/configuring-fallback-service.md deleted file mode 100644 index 46d238e4b6ad..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-fallback-service.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Configuring a fallback service ---- - -This guide walks through how to setup a fallback service using Ingress -resource. The fallback service will receive all requests that don't -match against any of the defined Ingress rules. -This can be useful for scenarios where you would like to return a 404 page -to the end user if the user clicks on a dead link or inputs an incorrect URL. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup a simple HTTP service in the -cluster and proxy it. - -```bash -$ echo ' -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fallback-svc -spec: - replicas: 1 - selector: - matchLabels: - app: fallback-svc - template: - metadata: - labels: - app: fallback-svc - spec: - containers: - - name: fallback-svc - image: hashicorp/http-echo - args: - - "-text" - - "This is not the path you are looking for. - Fallback service" - ports: - - containerPort: 5678 ---- -apiVersion: v1 -kind: Service -metadata: - name: fallback-svc - labels: - app: fallback-svc -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 5678 - protocol: TCP - name: http - selector: - app: fallback-svc -' | kubectl apply -f - -``` - -Result: - -```bash -deployment.apps/fallback-svc created -service/fallback-svc created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup a fallback service - -Let's deploy another sample service service: - -```bash -$ kubectl apply -f https://bit.ly/fallback-svc -deployment.extensions/fallback-svc created -service/fallback-svc created -``` - -Next, let's set up an Ingress rule to make it the fallback service -to send all requests to it that don't match any of our Ingress rules: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: fallback - annotations: - kubernetes.io/ingress.class: kong -spec: - backend: - serviceName: fallback-svc - servicePort: 80 -" | kubectl apply -f - -``` - -## Test it - -Now send a request with a request property that doesn't match against -any of the defined rules: - -```bash -$ curl $PROXY_IP/random-path -This is not the path you are looking for. - Fallback service -``` - -The above message comes from the fallback service that was deployed in the -last step. - -Create more Ingress rules, some complicated regex based ones and -see how requests that don't match any rules, are forwarded to the -fallback service. - -You can also use Kong's request-termination plugin on the `fallback` -Ingress resource to terminate all requests at Kong, without -forwarding them inside your infrastructure. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-health-checks.md b/app/kubernetes-ingress-controller/1.3.x/guides/configuring-health-checks.md deleted file mode 100644 index 875525104609..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-health-checks.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: Setting up Active and Passive health checks ---- - -In this guide, we will go through steps necessary to setup active and passive -health checking using the {{site.kic_product_name}}. This configuration allows -Kong to automatically short-circuit requests to specific Pods that are -mis-behaving in your Kubernetes Cluster. - -> Please make sure to use {{site.kic_product_name}} >= 0.6 as the previous -versions contain a [bug](https://github.com/hbagdi/go-kong/issues/6). - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy any requests yet. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Observe the headers and you can see that Kong has proxied the request correctly. - -## Setup passive health checking - -Now, let's setup passive HTTP health-check for our service. -All health-checking is done at Service-level and not Ingress-level. - -Add the following KongIngress resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking created -``` - -Here, we are configuring Kong to short-circuit requests to a pod -if a pod throws 3 consecutive errors. - -Next, associate the KongIngress resource with `httpbin` service: - -```bash -$ kubectl patch svc httpbin -p '{"metadata":{"annotations":{"konghq.com/override":"demo-health-checking"}}}' -service/httpbin patched -``` - -Now, let's send some traffic to test if this works: - -Let's send 2 requests that represent a failure from upstream -and then send a request for 200. -Here we are using `/status/500` to simulate a failure from upstream. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Kong has not short-circuited because there were only two failures. -Let's send 3 requests and open the circuit, and then send a normal request. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 22:41:19 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} - -``` - -As we can see, Kong returns back a 503, representing that the service is -unavailable. Since we have only one pod of httpbin running in our cluster, -and that is throwing errors, Kong will not proxy anymore requests. - -Now we have a few options: - -- Delete the current httpbin pod; Kong will then proxy requests to the new - pod that comes in its place. -- Scale the httpbin deployment; Kong will then proxy requests to the new - pods and leave the short-circuited pod out of the loop. -- Manually change the pod health status in Kong using Kong's Admin API. - -These options highlight the fact that once a circuit is opened because of -errors, there is no way for Kong to close the circuit again. - -This is a feature which some services might need, where once a pod starts -throwing errors, manual intervention is necessary before that pod can -again handle requests. -To get around this, we can introduce active health-check, where each instance -of Kong actively probes pods to figure out if they are healthy or not. - -## Setup active health checking - -Let's update our KongIngress resource to use active health-checks: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - active: - healthy: - interval: 5 - successes: 3 - http_path: /status/200 - type: http - unhealthy: - http_failures: 1 - interval: 5 - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking configured -``` - -Here, we are configuring Kong to actively probe `/status/200` every 5 seconds. -If a pod is unhealthy (from Kong's perspective), -3 successful probes will change the status of the pod to healthy and Kong -will again start to forward requests to that pod. - -Now, the requests should flow once again: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Let's trip the circuit again by sending three requests that will return -500s from httpbin: - -```bash -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -``` - -Now, sending the following request will fail for about 15 seconds, -the duration it will take active healthchecks to re-classify -the httpbin pod as healthy again. - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 23:17:47 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} -``` - -After 15 seconds, you will see: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As we can see, active health-checks automatically marked a pod as healthy -when passive health-checks marked it unhealthy. - -## Bonus - -Scale the `httpbin` and `ingress-kong` deployments and observe how -multiple pods change the outcome of the above demo. - -Read more about health-checks and ciruit breaker in Kong's -[documentation](/gateway/latest/reference/health-checks-circuit-breakers). diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-https-redirect.md b/app/kubernetes-ingress-controller/1.3.x/guides/configuring-https-redirect.md deleted file mode 100644 index b54fb8e41a2f..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/configuring-https-redirect.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Configuring https redirect ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -redirect HTTP request to HTTPS so that all communication -from the external world to your APIs and microservices is encrypted. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup HTTPS redirect - -To instruct Kong to redirect all HTTP requests matching this Ingress rule to -HTTPS, update its annotations to limit its protocols to HTTPS only and -issue a 301 redirect: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"https","konghq.com/https-redirect-status-code":"301"}}}' -ingress.extensions/demo patched -``` - -## Test it - -Now, making a plain-text HTTP request to Kong will result in a redirect -being issued from Kong: - -```bash -$ curl $PROXY_IP/foo/headers -I -HTTP/1.1 301 Moved Permanently -Date: Tue, 06 Aug 2019 18:04:38 GMT -Content-Type: text/html -Content-Length: 167 -Connection: keep-alive -Location: https://35.197.125.63/foo/headers -Server: kong/1.2.1 -``` - -The `Location` header will contain the URL you need to use for an HTTPS -request. Please note that this URL will be different depending on your -installation method. You can also grab the IP address of the load balancer -fronting Kong and send a HTTPS request to test it. - -Let's test it: - -```bash -$ curl -k https://35.197.125.63/foo/headers -{ - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Host": "35.197.125.63", - "User-Agent": "curl/7.54.0", - "X-Forwarded-Host": "35.197.125.63" - } -} -``` - -We can see that Kong correctly serves the request only on HTTPS protocol -and redirects the user if plaint-text HTTP protocol is used. -We had to use `-k` flag in cURL to skip certificate validation as the -certificate served by Kong is a self-signed one. -If you are serving this traffic via a domain that you control and have -configured TLS properties for it, then the flag won't -be necessary. - -If you have a domain that you control but don't have TLS/SSL certificates -for it, please check out out -[Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager) guide which can get TLS -certificates setup for you automatically. And it's free, thanks to -Let's Encrypt! diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/getting-started-istio.md b/app/kubernetes-ingress-controller/1.3.x/guides/getting-started-istio.md deleted file mode 100644 index 4f71ca8ba573..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/getting-started-istio.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Running the Kong Ingress Controller with Istio ---- - -In this guide, you will: -* Install Istio v1.6.7 and Kong in your cluster. -* Deploy an example Istio-enabled application (_bookinfo_). -* Deploy an `Ingress` customized with a `KongPlugin` for the example application. -* Make several requests to the sample application via Kong and Istio. -* See the performance metrics of the sample application, provided by Istio. - -### Prerequisites -For this guide, you will need: - -* A Kubernetes v1.15 (or newer) cluster which can pull container images from public registries. For example, you can use: - * A managed Kubernetes cluster (AWS EKS, Google Cloud GKE, Azure AKS). - * Minikube. - * `microk8s` with the `dns` addon enabled. -* `kubectl` with admin access to the cluster. - -### Download Istio - -Download the Istio bundle at version 1.6.7: - -```console -$ curl -L https://istio.io/downloadIstio | env ISTIO_VERSION=1.6.7 sh - -... -... -Istio 1.6.7 Download Complete! - -Istio has been successfully downloaded into the istio-1.6.7 folder on your system. -... -... -``` - -### Install Istio Operator - -Invoke `istioctl` to deploy the Istio Operator to the Kubernetes cluster: - -```console -$ ./istio-1.6.7/bin/istioctl operator init -Using operator Deployment image: docker.io/istio/operator:1.6.7 -✔ Istio operator installed -✔ Installation complete -``` - -### Deploy Istio using Operator - -Deploy Istio using Istio Operator: - -```console -$ kubectl create namespace istio-system -namespace/istio-system created -``` -```console -$ kubectl apply -f - < 8000 -Forwarding from [::1]:8080 -> 8000 -... -``` - -Navigate your web browser to `http://localhost:8080/` You should be able to see a bookstore web application. Click -through any available links several times. As you hit 30 requests per minute (for example, by holding down the "Refresh" -key combination, e.g. `` or ``), you should obtain a `Kong Error - API rate limit exceeded` response. - -### See the connection graph in Kiali - -Connect to Kiali (the Istio dashboard): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/kiali 20001:20001 -n istio-system -Forwarding from 127.0.0.1:20001 -> 20001 -Forwarding from [::1]:20001 -> 20001 -... -``` - -* Navigate your web browser to `http://localhost:20001/`. -* Log in using the default credentials (`admin`/`admin`). -* Choose _Workloads_ from the menu on the left. -* Select `my-istio-app` in the _Namespace_ drop-down menu. -* Click the _productpage-v1_ service name. -* Click the three dots button in the top-right corner of _Graph Overview_ and click _Show full graph_. -* Select `kong-istio` alongside `my-istio-app` in the _Namespace_ diagram. -* Observe a connection graph spanning from `example-kong-kong-proxy` through `productpage-v1` to the other sample -application services such as `ratings-v1` and `details-v1`. - -### See the metrics in Grafana - -Connect to Grafana (a dashboard frontend for Prometheus which has been deployed with Istio): - -```console -$ # Keep the command below running in the background -$ kubectl port-forward service/grafana 3000:3000 -n istio-system -Forwarding from 127.0.0.1:3000 -> 3000 -Forwarding from [::1]:3000 -> 3000 -... -``` - -* Navigate your web browser to `http://localhost:3000/`. -* Expand the dashboard selection drop-down menu from the top of the screen. Expand the `istio` directory and choose the -_Istio Workload Dashboard_ from the list. -* Choose _Namespace: my-istio-app_ and _Workload: productpage-v1_ from the drop-downs. -* Choose a timespan in the top-right of the page to include the time when you made requests to the sample application (e.g. _Last 1 hour_). -* Observe the incoming and outgoing request graphs reflecting actual requests from Kong to `productpage-v1`, and from `productpage-v1` to its backends. - -Note that the requests from the web browser to Kong are not reflected in inbound stats of `example-kong-kong-proxy` -because we've issued these requests by `kubectl port-forward`, thus bypassing the Istio proxy sidecar in Kong. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/getting-started.md b/app/kubernetes-ingress-controller/1.3.x/guides/getting-started.md deleted file mode 100644 index 2de9ca9d32fc..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/getting-started.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Getting started with the Kong Ingress Controller ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return back -a HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.1.2 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy the request yet. - -## Set up an echo-server - -Setup an echo-server application to demonstrate how -to use the {{site.kic_product_name}}: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -This application just returns information about the -pod and details from the HTTP request. - -## Basic proxy - -Create an Ingress rule to proxy the echo-server created previously: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 17:12:49 GMT -Server: echoserver -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-txt52 - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-txt52 - pod namespace: default - pod IP: 172.17.0.14 -<-- clipped --> -``` - -If everything is deployed correctly, you should see the above response. -This verifies that Kong can correctly route traffic to an application running -inside Kubernetes. - -## Using plugins in Kong - -Setup a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -kongplugin.configuration.konghq.com/request-id created -``` - -Create a new Ingress resource which uses this plugin: - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -The above resource directs Kong to execute the request-id plugin whenever -a request is proxied matching any rule defined in the resource. - -Send a request to Kong: - -```bash -$ curl -i -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:09:02 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 - - - -Hostname: echo-758859bbfb-cnfmx - -Pod Information: - node name: minikube - pod name: echo-758859bbfb-cnfmx - pod namespace: default - pod IP: 172.17.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=172.17.0.2 - method=GET - real path=/bar/sample - query= - request_version=1.1 - request_scheme=http - request_uri=http://example.com:8080/bar/sample - -Request Headers: - accept=*/* - connection=keep-alive - host=example.com - my-request-id=7250803a-a85a-48da-94be-1aa342ca276f#6 - user-agent=curl/7.54.0 - x-forwarded-for=172.17.0.1 - x-forwarded-host=example.com - x-forwarded-port=8000 - x-forwarded-proto=http - x-real-ip=172.17.0.1 - -Request Body: - -no body in request- -``` - -The `my-request-id` can be seen in the request received by echo-server. -It is injected by Kong as the request matches one -of the Ingress rules defined in `demo-example-com` resource. - -## Using plugins on Services - -Kong Ingress allows plugins to be executed on a service level, meaning -Kong will execute a plugin whenever a request is sent to a specific k8s service, -no matter which Ingress path it came from. - -Create a KongPlugin resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: rl-by-ip -config: - minute: 5 - limit_by: ip - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/rl-by-ip created -``` - -Next, apply the `konghq.com/plugins` annotation on the Kubernetes Service -that needs rate-limiting: - -```bash -kubectl patch svc echo \ - -p '{"metadata":{"annotations":{"konghq.com/plugins": "rl-by-ip\n"}}}' -``` - -Now, any request sent to this service will be protected by a rate-limit -enforced by Kong: - -```bash -$ curl -I $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:25:49 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 2 -X-Kong-Upstream-Latency: 0 -X-Kong-Proxy-Latency: 4 -Via: kong/1.1.2 - -$ curl -I -H "Host: example.com" $PROXY_IP/bar/sample -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 18:28:30 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 2 -Via: kong/1.1.2 -``` - -## Result - -This guide sets up the following configuration: - -```text -HTTP requests with /foo -> Kong enforces rate-limit -> echo server - -HTTP requests with /bar -> Kong enforces rate-limit + -> echo-server - on example.com injects my-request-id header -``` diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/overview.md b/app/kubernetes-ingress-controller/1.3.x/guides/overview.md deleted file mode 100644 index ca89c71096df..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/overview.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Guides ---- - -Follow one of the guides to learn more about how to use -the {{site.kic_product_name}}: - -- [Getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started/) with the {{site.kic_product_name}} -- [Getting started using Istio](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started-istio/) with the {{site.kic_product_name}} and Istio -- [Using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins in Kong using a declarative - approach. -- [Using KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource/) - This guide explains how the KongIngress resource can be used to change Kong - specific settings like load-balancing, health-checking and proxy behaviour. -- [Using KongConsumer and Credential Resources](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) - This guide walks through how Kubernetes native declarative configuration - can be used to dynamically provision credentials for authentication purposes - in the Ingress layer. -- [Using JWT and ACL KongPlugin resources](/kubernetes-ingress-controller/{{page.release}}/guides/configure-acl-plugin/) - This guides walks you through configuring the JWT plugin and ACL plugin for - authentication purposes at the Ingress layer -- [Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager/) - This guide walks through how to use cert-manager along with Kong Ingress - Controller to automate TLS certificate provisioning and using them - to encrypt your API traffic. -- [Configuring a fallback service](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-fallback-service/) - This guide walks through how to setup a fallback service using Ingress - resource. The fallback service will receive all requests that don't - match against any of the defined Ingress rules. -- [Using external service](/kubernetes-ingress-controller/{{page.release}}/guides/using-external-service/) - This guide shows how to expose services running outside Kubernetes via Kong, - using [External Name](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) - Services in Kubernetes. -- [Configuring HTTPS redirects for your services](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-https-redirect/) - This guide walks through how to configure the {{site.kic_product_name}} to - redirect HTTP request to HTTPS so that all communication - from the external world to your APIs and microservices is encrypted. -- [Using Redis for rate-limiting](/kubernetes-ingress-controller/{{page.release}}/guides/redis-rate-limiting/) - This guide walks through how to use Redis for storing rate-limit information - in a multi-node Kong deployment. -- [Integrate the {{site.kic_product_name}} with Prometheus/Grafana](/kubernetes-ingress-controller/{{page.release}}/guides/prometheus-grafana/) - This guide walks through the steps of how to deploy the {{site.kic_product_name}} - and Prometheus to obtain metrics for the traffic flowing into your - Kubernetes cluster. -- [Configuring circuit-breaker and health-checking](/kubernetes-ingress-controller/{{page.release}}/guides/configuring-health-checks/) - This guide walks through the usage of Circuit-breaking and health-checking - features of the {{site.kic_product_name}}. -- [Setting up custom plugin](/kubernetes-ingress-controller/{{page.release}}/guides/setting-up-custom-plugins/) - This guide walks through - installation of a custom plugin into Kong using - ConfigMaps and Volumes. -- [Using ingress with gRPC](/kubernetes-ingress-controller/{{page.release}}/guides/using-ingress-with-grpc/) - This guide walks through how to use the {{site.kic_product_name}} with gRPC. -- [Setting up upstream mTLS](/kubernetes-ingress-controller/{{page.release}}/guides/upstream-mtls/) - This guide gives an overview of how to setup mutual TLS authentication - between Kong and your upstream server. -- [Preserving Client IP address](/kubernetes-ingress-controller/{{page.release}}/guides/preserve-client-ip/) - This guide gives an overview of different methods to preserve the Client - IP address. -- [Using KongClusterPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) - This guide walks through setting up plugins that can be shared across - Kubernetes namespaces. -- [Using Kong with Knative](/kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/) - This guide gives an overview of how to setup Kong as the Ingress point - for Knative workloads. -- [Exposing TCP-based service](/kubernetes-ingress-controller/{{page.release}}/guides/using-tcpingress/) - This guide gives an overview of how to use TCPIngress resource to expose - non-HTTP based services outside a Kubernetes cluster. -- [Using mtls-auth plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-mtls-auth-plugin/) - This guide gives an overview of how to use `mtls-auth` plugin and CA - certificates to authenticate requests using client certificates. -- [Using OpenID-connect plugin](/kubernetes-ingress-controller/{{page.release}}/guides/using-oidc-plugin/) - This guide walks through steps necessary to set up OIDC authentication. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/preserve-client-ip.md b/app/kubernetes-ingress-controller/1.3.x/guides/preserve-client-ip.md deleted file mode 100644 index 0a76e545bbf5..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/preserve-client-ip.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Preserving Client IP Address ---- - -Kong is usually deployed behind a Load Balancer (using a -Kubernetes Service of type `LoadBalancer`). This can result -in loss of actual Client IP address and Kong observing the IP address -of the Load Balancer as the client IP address. This guide lays -out different methods of solving this problem. - -Preserving the Client IP address in cloud behind Load-Balancers requires -configuration that is be specific to your use-case, cloud provider -and other architecture details. -[This document](https://kubernetes.io/docs/tutorials/services/source-ip/) -provides details on how networking works inside Kubernetes and explains -in detail of how various methods describe later in this document work. -It is recommended that you give this a read. - -Following methods are possible to preserve Client IP address: - -## ExternalTrafficPolicy: Local - -As explained in -[Kubernetes docs](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), -setting `service.spec.externalTrafficPolicy` to `Local` preserves the Client -IP address. You don't need to change any configuration in Kong if you -are using this method to preserve Client IP address. - -Please note that this is not supported by all of the public Cloud providers. - -## Proxy Protocol - -If you have an L4 Load Balancer that supports Proxy Protocol, and you're -terminating TCP connections at the Load Balancer before passing traffic -onward to Kong, then you can configure Kong to pick up the Client IP -address via this protocol. - -Once you have configured the Load Balancer to use Proxy Protocol, you -need to set the following environment variables in Kong for Kong to -receive the Client IP from the Proxy Protocol header. - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_PROXY_LISTEN`](/gateway/latest/reference/configuration/#proxy_listen) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) - -For example: - -``` -KONG_TRUSTED_IPS=0.0.0.0/0,::/0 # This trusts all IPs -KONG_PROXY_LISTEN="0.0.0.0:8000 proxy_protocol, 0.0.0.0:8443 ssl proxy_protocol" -KONG_REAL_IP_HEADER=proxy_protocol -``` - -## HTTP headers - -If you are using an L7 Load Balancer, i.e. HTTP requests are being terminated -at the Load Balancer, then you need to use `x-forwarded-for` or `x-real-ip` -header to preserve details of the connection between the Client and Load Balancer. - -You should configure the Load Balancer to inject these headers, and then -you need to set the following environment variables in Kong for Kong to pick up -the Client IP address from HTTP headers: - -- [`KONG_TRUSTED_IPS`](/gateway/latest/reference/configuration/#trusted_ips) -- [`KONG_REAL_IP_HEADER`](/gateway/latest/reference/configuration/#real_ip_header) -- Optional [`KONG_REAL_IP_RECURSIVE`](/gateway/latest/reference/configuration/#real_ip_recursive) - -Please note that if you are using an L7 Load Balancer with Kong, -you cannot use the `certificates` feature in Kong as the TLS session is -already established between the Client and Load Balancer. - -## Cloud-provider specific details - -For the major public clouds, follow are some additional -details that can help you preserve the client IP address: - -### GKE - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### AKS - -You can use `ExternalTrafficPolicy: Local` to preserve the Client IP address. - -### EKS - -You have two options: - -- L4 Load Balancer - In this case, you need to use the Proxy Protocol method to preserve Client IP - address. -- L7 Load Balancer - In this case, you need to use the HTTP headers method to preserve the Client - IP address. - -The recommend Load Balancer type for AWS is NLB. -You can choose the type of Load Balancer using the following annotation: - -``` -service.beta.kubernetes.io/aws-load-balancer-type: nlb -``` - -Other useful annotations for AWS are: - -``` -service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp -service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*' -``` - -A complete list can be found -[here](https://gist.github.com/mgoodness/1a2926f3b02d8e8149c224d25cc57dc1). diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/prometheus-grafana.md b/app/kubernetes-ingress-controller/1.3.x/guides/prometheus-grafana.md deleted file mode 100644 index bf6d82958273..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/prometheus-grafana.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: Integrate the Kong Ingress Controller with Prometheus/Grafana ---- - -The {{site.kic_product_name}} can give you visibility not only into how Kong is -performing but also gives visibilty into -how the services in your Kubernetes cluster are responding to the -inbound traffic. - -This how-to guide walks through the steps of how to configure Kong and -Prometheus to collect metrics from the {{site.kic_product_name}}. - -> Note: This guide was originally posted on Kong Inc's blog: -[https://konghq.com/blog/observability-kubernetes-kong/](https://konghq.com/blog/observability-kubernetes-kong) - -## Prerequisites - -You’ll need a few things before we can start: - -- **Kubernetes cluster**: You can use Minikube or a GKE cluster for the - purpose of this tutorial. We are running a GKE Kubernetes cluster v1.12.x. -- **Helm**: We will be using [Helm](https://helm.sh/) - to install all of our components. - Tiller should be installed on your k8s cluster and - Helm CLI should be available on your workstation. - You can follow Helm’s quickstart guide to set up helm. - -Once you have Kubernetes and Helm set up, please proceed. - -Caution: Settings here are tweaked to keep this guide simple. -These settings are not meant for production usage. - -## Install Prometheus and Grafana - -If you already have Prometheus and Grafana installed on your Kubernetes cluster, -you can skip these steps. - -### Prometheus - -First, we will install Prometheus with a -scrape interval of 10 seconds to have fine-grained data points for all metrics. -We’ll install both Prometheus and Grafana in a dedicated `monitoring` namespace. - -To install Prometheus, execute the following: - -```bash -$ kubectl create namespace monitoring -$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -$ helm install prometheus prometheus-community/prometheus --namespace monitoring --values https://bit.ly/2RgzDtg --version 11.0.3 -``` - -### Grafana - -Grafana is installed with the following values for its Helm chart -(see comments for explanation): - -```yaml -persistence: - enabled: true # enable persistence using Persistent Volumes -datasources: - datasources.yaml: - apiVersion: 1 - Datasources: # configure Grafana to read metrics from Prometheus - - name: Prometheus - type: prometheus - url: http://prometheus-server # Since Prometheus is deployed in - access: proxy # same namespace, this resolves - # to the Prometheus Server we installed previous - isDefault: true # The default data source is Prometheus - -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' # Configure a dashboard provider file to - orgId: 1 # put Kong dashboard into. - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default -dashboards: - default: - kong-dash: - gnetId: 7424 # Install the following Grafana dashboard in the - revision: 5 # instance: https://grafana.com/dashboards/7424 - datasource: Prometheus -``` - -To install Grafana, execute the following: - -```bash -$ helm repo add grafana https://grafana.github.io/helm-charts -$ helm install grafana grafana/grafana --namespace monitoring --values http://bit.ly/2FuFVfV --version 5.0.8 -``` - -## Install Kong - -We will use Kong's Helm chart to install Kong -but you can also use plain manifests for this purpose. - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ kubectl create namespace kong -$ helm install mykong kong/kong --namespace kong --values https://bit.ly/2UAv0ZE -``` - -### Enable Prometheus plugin in Kong - -We will enable the Promtheus plugin in Kong at the global level, meaning -each request that flows into the Kubernetes cluster gets tracked in Prometheus: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: prometheus - annotations: - kubernetes.io/ingress.class: kong - labels: - global: "true" -plugin: prometheus -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/prometheus created -``` - -## Set Up Port Forwards - -Now, we will gain access to the components we just deployed. -In a production environment, you would have a Kubernetes Service with -an external IP or load balancer, which would allow you to access -Prometheus, Grafana, and Kong. -For demo purposes, we will set up port-forwarding using kubectl to get access. -It is not advisable to do this in production. - -Open a new terminal and execute the following commands: - -```bash -POD_NAME=$(kubectl get pods --namespace monitoring -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 9090 & - -# You can access Prometheus in your browser at localhost:9090 - -POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace monitoring port-forward $POD_NAME 3000 & - -# You can access Grafana in your browser at localhost:3000 - -POD_NAME=$(kubectl get pods --namespace kong -o jsonpath="{.items[0].metadata.name}") -kubectl --namespace kong port-forward $POD_NAME 8000 & - -# Kong proxy port is now your localhost 8000 port -# We are using plain-text HTTP proxy for this purpose of -# demo. -# You can also use the LoadBalancer IP address and not set up this -# port-forwarding if you are running in a cloud environment. -``` - -## Access Grafana Dashboard - -To access Grafana, you need to get the password for the admin user. - -Execute the following to read the password and take note of it: - -```bash -kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo -``` - -Now, browse to [http://localhost:3000](http://localhost:3000) and -fill in username as “admin” and password as what you just noted above. -You should be logged in to Grafana and Kong’s Grafana Dashboard -should already be installed for you. - -## Setup Services - -We have all the components for monitoring installed, -we will now spin up some services for demo purposes and setup Ingress -routing for them. - -### Install Services - -We will set up three services: billing, invoice, and comments. -Execute the following to spin these services up: - -```bash -kubectl apply -f https://gist.githubusercontent.com/hbagdi/2d8ef66fe22cb99e1514f410f992268d/raw/a03d789b70c46ccd0b99d9f1ed838dc21419fc33/multiple-services.yaml -``` - -### Install Ingress for the Services - -Next, once the services are up and running, we will create Ingress -routing rules in Kubernetes. -This will configure Kong to proxy traffic destined for these services correctly. - -Execute the following: - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: sample-ingresses - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /billing - backend: - serviceName: billing - servicePort: 80 - - path: /comments - backend: - serviceName: comments - servicePort: 80 - - path: /invoice - backend: - serviceName: invoice - servicePort: 80 -' | kubectl apply -f - -``` - -## Let’s Create Some Traffic - -We’re done configuring our services and proxies. -Time to see if our setup works. -Execute the following in a new terminal: - -```bash -while true; -do - curl http://localhost:8000/billing/status/200 - curl http://localhost:8000/billing/status/501 - curl http://localhost:8000/invoice/status/201 - curl http://localhost:8000/invoice/status/404 - curl http://localhost:8000/comments/status/200 - curl http://localhost:8000/comments/status/200 - sleep 0.01 -done -``` - -Since we have already enabled Prometheus plugin in Kong to -collect metrics for requests proxied via Kong, -we should see metrics coming through in the Grafana dashboard. - -You should be able to see metrics related to the traffic flowing -through our services. -Try tweaking the above script to send different traffic patterns -and see how the metrics change. -The upstream services are httpbin instances, meaning you can use -a variety of endpoints to shape your traffic. - -## Metrics collected - -### Request Latencies of Various Services - -![Request latencies](/assets/images/products/kubernetes-ingress-controller/request-latencies.png) - -Kong collects latency data of how long your services take to respond to -requests. One can use this data to alert the on-call engineer if the latency -goes beyond a certain threshold. For example, let’s say you have an SLA -that your APIs will respond with latency of less than 20 millisecond -for 95% of the requests. -You could configure Prometheus to alert based on the following query: - -```text -histogram_quantile(0.95, sum(rate(kong_latency_bucket{type="request"}[1m])) by (le,service)) > 20 -``` - -The query calculates the 95th percentile of the the total request -latency (or duration) for all of your services and alerts you if it is more -than 20 milliseconds. -The “type” label in this query is “request”, which tracks the latency -added by Kong and the service. -You can switch this to “upstream” to track latency added by the service only. -Prometheus is highly flexible and well documented, so we won’t go into -details of setting up alerts here, but you’ll be able to find them -in the Prometheus documentation. - -### Kong Proxy Latency - -![Proxy latencies](/assets/images/products/kubernetes-ingress-controller/proxy-latencies.png) - -Kong also collects metrics about its performance. -The following query is similar to the previous one but gives -us insight into latency added by Kong: - -```text -histogram_quantile(0.90, sum(rate(kong_latency_bucket{type="kong"}[1m])) by (le,service)) > 2 -``` - -### Error Rates - -![Error rates](/assets/images/products/kubernetes-ingress-controller/error-rates.png) - -Another important metric to track is the rate of errors and requests -your services are serving. -The time series `kong_http_status` collects HTTP status code metrics -for each service. - -This metric can help you track the rate of errors for each of your service: - -```text -sum(rate(kong_http_status{code=~"5[0-9]{2}"}[1m])) by (service) -``` - -You can also calculate the percentage of requests in any duration -that are errors. Try to come up with a query to derive that result. - -Please note that all HTTP status codes are indexed, meaning you could use -the data to learn about your typical traffic pattern and identify problems. -For example, a sudden rise in 404 response codes could be indicative -of client codes requesting an endpoint that was removed in a recent deploy. - -### Request Rate and Bandwidth - -![Request rates](/assets/images/products/kubernetes-ingress-controller/request-rate.png) - -One can derive the total request rate for each of your services or -across your Kubernetes cluster using the `kong_http_status` time series. - -![Bandwidth](/assets/images/products/kubernetes-ingress-controller/bandwidth.png) - -Another metric that Kong keeps track of is the amount of -network bandwidth (`kong_bandwidth`) being consumed. -This gives you an estimate of how request/response sizes -correlate with other behaviors in your infrastructure. - -You now have metrics for the services running inside your Kubernetes cluster -and have much more visibility into your applications, without making -any modifications in your services. -You can use Alertmanager or Grafana to now configure alerts based on -the metrics observed and your SLOs. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/redis-rate-limiting.md b/app/kubernetes-ingress-controller/1.3.x/guides/redis-rate-limiting.md deleted file mode 100644 index 64f13251f006..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/redis-rate-limiting.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Using Redis for rate-limiting ---- - -Kong can rate-limit your traffic without any external dependency. -In such a case, Kong stores the request counters in-memory -and each Kong node applies the rate-limiting policy independently. -There is no synchronization of information being done in this case. -But if Redis is available in your cluster, Kong -can take advantage of it and synchronize the rate-limit information -across multiple Kong nodes and enforce a slightly different rate-limiting -policy. - -This guide walks through the steps of using Redis for rate-limiting in -a multi-node Kong deployment. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Set up rate-limiting - -We will start by creating a global rate-limiting policy: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -Here we are configuring the {{site.kic_product_name}} to rate-limit traffic from -any client to 5 requests per minute, and we are applying this policy in a -global sense, meaning the rate-limit will apply across all services. - -You can set this up for a specific Ingress or a specific service as well, -please follow [using KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/) -guide on steps for doing that. - -Next, test the rate-limiting policy by executing the following command -multiple times and observe the rate-limit headers in the response: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -As there is a single Kong instance running, Kong correctly imposes -the rate-limit and you can make only 5 requests in a minute. - -## Scale the controller to multiple pods - -Now, let's scale up the {{site.kic_product_name}} deployment to 3 pods, for -scalability and redundancy: - -```bash -$ kubectl scale --replicas 3 -n kong deployment ingress-kong -deployment.extensions/ingress-kong scaled -``` - -It will take a couple minutes for the new pods to start up. -Once the new pods are up and running, test the rate-limiting policy by -executing the following command and observing the rate-limit headers: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -You will observe that the rate-limit is not consistent anymore -and you can make more than 5 requests in a minute. - -To understand this behavior, we need to understand how we have configured Kong. -In the current policy, each Kong node is tracking a rate-limit in-memory -and it will allow 5 requests to go through for a client. -There is no synchronization of the rate-limit information across Kong nodes. -In use-cases where rate-limiting is used as a protection mechanism and to -avoid over-loading your services, each Kong node tracking its own counter -for requests is good enough as a malicious user will hit rate-limits on all -nodes eventually. -Or if the load-balancer in-front of Kong is performing some -sort of deterministic hashing of requests such that the same Kong node always -receives the requests from a client, then we won't have this problem at all. - -In some cases, a synchronization of information that each Kong node maintains -in-memory is needed. For that purpose, Redis can be used. -Let's go ahead and set this up next. - -## Deploy Redis to your Kubernetes cluster - -First, we will deploy redis in our Kubernetes cluster: - -```bash -$ kubectl apply -n kong -f https://bit.ly/k8s-redis -deployment.apps/redis created -service/redis created -``` - -Once this is deployed, let's update our KongClusterPlugin configuration to use -Redis as a datastore rather than each Kong node storing the counter information -in-memory: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - policy: redis - redis_host: redis -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit configured -``` - -Notice, how the `policy` is now set to `redis` and we have configured Kong -to talk to the `redis` server available at `redis` DNS name, which is the -Redis node we deployed earlier. - -## Test it - -Now, if you go ahead and execute the following commands, you should be able -to make only 5 requests in a minute: - -```bash -$ curl -I $PROXY_IP/foo/headers -``` - -This guide shows how to use Redis as a data-store for rate-limiting plugin, -but this can be used for other plugins which support Redis as a data-store -like proxy-cache. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/setting-up-custom-plugins.md b/app/kubernetes-ingress-controller/1.3.x/guides/setting-up-custom-plugins.md deleted file mode 100644 index bb56a4b3b174..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/setting-up-custom-plugins.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Setting up custom plugin in Kubernetes environment ---- - -This guide goes through steps on installing a custom plugin -in Kong without using a Docker build. - -## Prepare a directory with plugin code - -First, we need to create either a ConfigMap or a Secret with -the plugin code inside it. -If you would like to install a plugin which is available as -a rock from Luarocks, then you need to download it, unzip it and create a -ConfigMap from all the Lua files of the plugin. - -We are going to setup a dummy plugin next. -If you already have a real plugin, you can skip this step. - -```shell -$ mkdir myheader && cd myheader -$ echo 'local MyHeader = {} - -MyHeader.PRIORITY = 1000 - -function MyHeader:header_filter(conf) - -- do custom logic here - kong.response.set_header("myheader", conf.header_value) -end - -return MyHeader -' > handler.lua - -$ echo 'return { - name = "myheader", - fields = { - { config = { - type = "record", - fields = { - { header_value = { type = "string", default = "roar", }, }, - }, - }, }, - } -} -' > schema.lua -``` - -Once we have our plugin code available in a directory, -the directory should look something like this: - -```shell -$ tree myheader -myheader -├── handler.lua -└── schema.lua - -0 directories, 2 files -``` - -You might have more files inside the directory as well. - -## Create a ConfigMap or Secret with the plugin code - -Next, we are going to create a ConfigMap or Secret based on the plugin -code. - -Please ensure that this is created in the same namespace as the one -in which Kong is going to be installed. - -```shell -# using ConfigMap; replace `myheader` with the name of your plugin -$ kubectl create configmap kong-plugin-myheader --from-file=myheader -n kong -configmap/kong-plugin-myheader created - -# OR using Secret -$ kubectl create secret generic -n kong kong-plugin-myheader --from-file=myheader -secret/kong-plugin-myheader created -``` - -## Modify configuration - -Next, we need to update Kong's Deployment to load our custom plugin. - -Based on your installation method, this step will differ slightly. -The next section explains what changes are necessary. - -### YAML - -The following patch is necessary to load the plugin. -Notable changes: -- The plugin code is mounted into the pod via `volumeMounts` and `volumes` - configuration property. -- `KONG_PLUGINS` environment variable is set to include the custom plugin - along with all the plugins that come in Kong by default. -- `KONG_LUA_PACKAGE_PATH` environment variable directs Kong to look - for plugins in the directory where we are mounting them. - -If you have multiple plugins, simply mount multiple -ConfigMaps and include the plugin name in the `KONG_PLUGINS` -environment variable. - -> Please note that if your plugin code involves database - migration then you need to include the below patch to pod definition of your - migration Job as well. - -Please note that the below is not a complete definition of -the Deployment but merely a strategic patch which can be applied to -an existing Deployment. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ingress-kong - namespace: kong -spec: - template: - spec: - containers: - - name: proxy - env: - - name: KONG_PLUGINS - value: bundled,myheader - - name: KONG_LUA_PACKAGE_PATH - value: "/opt/?.lua;;" - volumeMounts: - - name: kong-plugin-myheader - mountPath: /opt/kong/plugins/myheader - volumes: - - name: kong-plugin-myheader - configMap: - name: kong-plugin-myheader -``` - -### Helm chart - -With Helm, this is as simple as adding the following values to -your `values.yaml` file: - -```yaml -# values.yaml -plugins: - configMaps: # change this to 'secrets' if you created a secret - - name: kong-plugin-myheader - pluginName: myheader -``` - -The chart automatically configures all the environment variables based on the -plugins you inject. - -Please ensure that you add in other configuration values -you might need for your installation to work. - -### Deploy - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Once, you have all the pieces in place, you are ready -to deploy the {{site.kic_product_name}}: - -```shell -# using YAML or kustomize -kustomize build github.com/hbagdi/yaml/kong/kong-custom-plugin | kubectl apply -f - - -# or helm -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 2 -$ helm install kong/kong --values values.yaml - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false --values values.yaml -``` - -Once you have setup Kong with the custom plugin installed, you can use it -like any other plugin. - -First, create a `KongPlugin` custom resource: - -```yaml -echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: my-custom-plugin -config: - header_value: "my first plugin" -plugin: myheader -" | kubectl apply -f - -``` - -and then can annotate an Ingress or Service resource to instruct -Kong on when to execute the plugin: - -```yaml -konghq.com/plugins: my-custom-plugin -``` - -Once you have got Kong up and running, configure your -custom plugin via [KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/). - - -### Plugins in other languages - -When deploying custom plugins in other languages, especially Golang, the built binary is larger than -the size limit of ConfigMap. In such cases, consider using an init container to pull large binaries from -remotes like S3 buckets, or build a custom image that includes plugin runtimes and the plugin itself. - -To read more about building a custom image, see -[use external plugins in container and Kubernetes](/gateway/latest/reference/external-plugins/#use-external-plugins-in-container-and-kubernetes). diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/upstream-mtls.md b/app/kubernetes-ingress-controller/1.3.x/guides/upstream-mtls.md deleted file mode 100644 index 7a6e50c40656..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/upstream-mtls.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Using mTLS with Kong ---- - -This guide walks through on how to setup Kong to perform mutual-TLS -authentication with an upstream service. - -> Please note that this guide walks through mTLS configuration between -Kong and a Service and not Kong and a client or consumer. - -## What is mTLS? - -Mutual authentication refers to two-way authencation, where the client and -server, both can authenticate themselves to the other party. - -With mutual TLS authentication, client and server both present TLS -certificates to the other party (and can prove their identity using their -private key) during the TLS handshake. They can verify the other's -certificate using the their trusted CAs. - -## mTLS with Kong - -Kong 1.3 and above support mutual TLS authentication between Kong and the -upstream service. - -Let's take a look at how one can configure it. - -## Configure Kong to verify upstream server certificate - -Kong, by default, does not verify the certificate presented by the upstream -service. - -To enforce certificate verification, you need to configure the following -environment variables on Kong's container in your deployment: - -``` -KONG_NGINX_PROXY_PROXY_SSL_VERIFY="on" -KONG_NGINX_PROXY_PROXY_SSL_VERIFY_DEPTH="3" -KONG_NGINX_PROXY_PROXY_SSL_TRUSTED_CERTIFICATE="/path/to/ca_certs.pem" -``` - -These basically translate to -[NGINX directives](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) -to configure NGINX to verify certificates. - -Please make sure that the trusted certificates are correctly -mounted into Kong's container and the path to certificate is correctly -reflected in the above environment variable. - -## Configure Kong to present its certificate to the upstream server - -In the above section, we achieved one side of mutual authentication, -where Kong has been configured to verify the identity of the upstream server. - -In this section, we will configure Kong to present its identity to the -upstream server. - -To configure this, you have two options, depending on your use-case. -If you would like Kong to present its client certificate to each and every -service that it talks to, you can configure the client certificate -at the global level using Nginx directives. -If you would like to configure a different certificate for -each service that Kong talks to or want to configure Kong to present a -client certificate only to a subset of all services that it is configured to -communicate with, then you can configure that using an annotation on -the Kubernetes Service resource. - -### Global Nginx directive - -You need to configure two Nginx directives for this purpose: -- [`proxy_ssl_certificate`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate) -- [`proxy_ssl_certificate_key`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key) - -You can mount the certificate and key pair using secrets into the Kong pod -and then set the following two environment variables to set the above two -directives: - -``` -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE="/path/to/client_cert.pem" -KONG_NGINX_PROXY_PROXY_SSL_CERTIFICATE_KEY="/path/to/key.pem" -``` - -Once configured, Kong will present its client certificate to every upstream -server that it talks to. - -### Per service annotation - -To configure a different client certificate for each service or only for a -subset of services, you can do so using the -[`konghq.com/client-cert`](/kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcom/client-cert) -annotation. - -To use the annotation, you first need to create a TLS secret with the -client certificate and key in Kubernetes. -The secret should be created in the same namespace as your Kubernetes -Service to which Kong should authenticate itself. - -Once the secret is in place, add the follow annotation on the service: - -``` -konghq.com/client-cert: -``` - -Kong will then use the TLS key-pair to authenticate itself against that service. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-consumer-credential-resource.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-consumer-credential-resource.md deleted file mode 100644 index 8c1acc30e7f8..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-consumer-credential-resource.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Provisioning Consumers and Credentials ---- - -This guide walks through how to use the KongConsumer custom -resource and use Secret resources to associate credentials with those -consumers. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Add authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. - -Let's add a KongPlugin resource to protect the API: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Now, associate this plugin with the previous Ingress rule we created -using the `konghq.com/plugins` annotation: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - konghq.com/plugins: httpbin-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -``` - -Any request matching the proxying rules defined in the `demo` ingress will -now require a valid API key: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 19:30:33 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -{"message":"No API key found in request"} -``` - -As you can see above, Kong returns back a `401 Unauthorized` because -we didn't provide an API key. - -## Provision a Consumer - -Let's create a KongConsumer resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, let's provision an API-key associated with -this consumer so that we can pass the authentication imposed by Kong: - -Next, we will create a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) -resource with an API-key inside it: - -```bash -$ kubectl create secret generic harry-apikey \ - --from-literal=kongCredType=key-auth \ - --from-literal=key=my-sooper-secret-key -secret/harry-apikey created -``` - -The type of credential is specified via `kongCredType`. -You can create the Secret using any other method as well. - -Since we are using the Secret resource, -Kubernetes will encrypt and store this API-key for us. - -Next, we will associate this API-key with the consumer we created previously. - -Please note that we are not re-creating the KongConsumer resource but -only updating it to add the `credentials` array: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -## Use the credential - -Now, use the credential to pass authentication: - -```bash -$ curl -i -H 'apikey: my-sooper-secret-key' $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:34:44 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -In this guide, we learned how to leverage an authentication plugin in Kong -and provision credentials. This enables you to offload authentication into -your Ingress layer and keeps the application logic simple. - -All other authentication plugins bundled with Kong work in this -way and can be used to quickly add an authentication layer on top of -your microservices. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-external-service.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-external-service.md deleted file mode 100644 index dc867422bd35..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-external-service.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Expose an external application ---- - -This example shows how we can expose a service located outside the Kubernetes cluster using an Ingress. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a Kubernetes service - -First we need to create a Kubernetes Service [type=ExternalName][0] using the hostname of the application we want to expose. - -```bash -echo " -kind: Service -apiVersion: v1 -metadata: - name: proxy-to-httpbin -spec: - ports: - - protocol: TCP - port: 80 - type: ExternalName - externalName: httpbin.org -" | kubectl create -f - -``` - -## Create an Ingress to expose the service at the path `/foo` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: proxy-from-k8s-to-httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: proxy-to-httpbin - servicePort: 80 -' | kubectl create -f - -``` - -## Test the service - -```bash -$ curl -i $PROXY_IP/foo -H "Host: httpbin.org" -``` - -[0]: https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-ingress-with-grpc.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-ingress-with-grpc.md deleted file mode 100644 index a001dd404143..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-ingress-with-grpc.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Using Ingress with gRPC ---- - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Prerequisite - -To make `gRPC` requests, you need a client that can invoke gRPC requests. -In this guide, we use -[`grpcurl`](https://github.com/fullstorydev/grpcurl#installation). -Ensure that you have it installed on your local system. - -## Test connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you haven't done so, follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure the `PROXY_IP` environment variable. - -If everything is set up correctly, Kong returns -`HTTP 404 Not Found` since the system does not know yet how to proxy the request. - -```bash -curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -#### Run gRPC - -1. Add a gRPC deployment and service: - - ```bash - kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/sample-apps/grpc.yaml - service/grpcbin created - deployment.apps/grpcbin created - ``` -2. Create a demo gRPC ingress rule: - - ```bash - echo "apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong - spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: grpcbin - servicePort: 9001" | kubectl apply -f - - ingress.extensions/demo created - ``` - -3. Next, we need to update the Ingress rule to specify gRPC as the protocol. -By default, all routes are assumed to be either HTTP or HTTPS. This annotation -informs Kong that this route is a gRPC(s) route and not a plain HTTP route: - - ```bash - kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"grpc,grpcs"}}}' - ``` - -4. We also update the upstream protocol to be `grpcs`. -Similar to routes, Kong assumes that services are HTTP-based by default. -With this annotation, we configure Kong to use gRPCs protocol when it -talks to the upstream service: - - ```bash - kubectl patch svc grpcbin -p '{"metadata":{"annotations":{"konghq.com/protocol":"grpcs"}}}' - ``` - -5. You should be able to run a request over `gRPC`: - - ```bash - grpcurl -v -d '{"greeting": "Kong Hello world!"}' -insecure $PROXY_IP:443 hello.HelloService.SayHello - ``` diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-kong-with-knative.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-kong-with-knative.md deleted file mode 100644 index 7969d3312584..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-kong-with-knative.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: Using Kong with Knative ---- - -The {{site.kic_product_name}} supports managing ingress traffic for -serverless workloads managed via Knative. - -In this guide, we will learn how to use Kong with Knative services and -configure plugins for Knative services. - - -## Pre-requisite - -This guide will be easier to follow if you have access to a Kubernetes -cluster that is running in the cloud rather than Minikube or any other -local environment. The guide requires access to DNS and a public IP -address or DNS name will certainly keep things simpler and easy for you. - -## Install Knative - -If you don't have knative installed, you need to install Knative: - -``` -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.18.0/serving-crds.yaml -kubectl apply --filename https://github.com/knative/serving/releases/download/v0.18.0/serving-core.yaml -``` - -This will install the resources that are required to run Knative. - -## Install Kong - -Next, install the {{site.kic_product_name}}: - -``` -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -``` - -You can choose to install a different flavor, like using a database, -or using an Enterprise installation instead of Open-Source. You can also -use Helm installation method if that works for you. - -Once Kong is installed, -you should note down the IP address or public CNAME of -`kong-proxy` service. - -In the current case case, - -```shell -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.248.154 35.247.39.83 80:30345/TCP,443:31872/TCP 53m -``` - -Take a note of the above IP address "`35.247.39.83`". This will be different -for your installation. - -## Configure Knative to use Kong for Ingress - -### Ingress class - -Next, we will configure Knative to use `kong` as the Ingress class: - -``` -$ kubectl patch configmap/config-network \ - --namespace knative-serving \ - --type merge \ - --patch '{"data":{"ingress.class":"kong"}}' -``` - -## Setup Knative domain - -As the final step, we need to configure Knative's base domain at which -our services will be accessible. - -We override the default ConfigMap with the DNS name of `${KONG_IP}.xip.io`. -This will be different for you: - -``` -$ echo ' -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-domain - namespace: knative-serving - labels: - serving.knative.dev/release: v0.13.0 -data: - 35.247.39.83.xip.io: "" -' | kubectl apply -f - -configmap/config-domain configured -``` - -Once this is done, the setup is complete and we can move onto using Knative -and Kong. - -## Test connectivity to Kong - -Send a request to the above domain that we have configured: - -```bash -curl -i http://35.247.39.83.xip.io/ -HTTP/1.1 404 Not Found -Date: Wed, 11 Mar 2020 00:18:49 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -X-Kong-Response-Latency: 1 -Server: kong/1.4.3 - -{"message":"no Route matched with those values"} -``` - -The 404 response is expected since we have not configured any services -in Knative yet. - -## Install a Knative Service - -Let's install our first Knative service: - -``` -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -``` - -It can take a couple of minutes for everything to get configured but -eventually, you will see the URL of the Service. -Let's make the call to the URL: - -```shell -$ curl -v http://helloworld-go.default..xip.io -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Tue, 10 Mar 2020 23:45:14 GMT -X-Kong-Upstream-Latency: 2723 -X-Kong-Proxy-Latency: 0 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -The request is served by Knative and from the response HTTP headers, -we can tell that the request was proxied by Kong. - -The first request will also take longer to complete as Knative will spin -up a new Pod to service the request. -We can see how Kong observed this latency and recorded it in the -`X-Kong-Upstream-Latency` header. -If you perform subsequent requests, -they should complete much faster. - -## Plugins for knative services - -Let's now execute a plugin for our new Knative service. - -First, let's create a KongPlugin resource: - -```shell -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong -plugin: response-transformer -" | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will update the Knative service created before and add in -annotation in the template: - -```shell -$ echo " -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: helloworld-go - namespace: default -spec: - template: - metadata: - annotations: - konghq.com/plugins: add-response-header - spec: - containers: - - image: gcr.io/knative-samples/helloworld-go - env: - - name: TARGET - value: Go Sample v1 -" | kubectl apply -f - -service.serving.knative.dev/helloworld-go configured -``` - -Please note that the annotation `konghq.com/plugins` is -not added to the Service definition -itself but to the `spec.template.metadata.annotations`. - -Let's make the request again: - -```shell -$ curl -i http://helloworld-go.default.35.247.39.83.xip.io/ -HTTP/1.1 200 OK -Content-Type: text/plain; charset=utf-8 -Content-Length: 20 -Connection: keep-alive -Date: Wed, 11 Mar 2020 00:35:07 GMT -demo: injected-by-kong -X-Kong-Upstream-Latency: 2455 -X-Kong-Proxy-Latency: 1 -Via: kong/1.4.3 - -Hello Go Sample v1! -``` - -As we can see, the response has the `demo` header injected. - -This guide demonstrates the power of using Kong and Knative together. -Checkout other plugins and try them out with multiple Knative services. -The possibilities are endless! diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongclusterplugin-resource.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-kongclusterplugin-resource.md deleted file mode 100644 index 141f2043f806..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongclusterplugin-resource.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Using KongClusterPlugin resource ---- - -In this guide, we will learn how to use KongClusterPlugin resource to configure -plugins in Kong. -The guide will cover configuring a plugin for services across different -namespaces. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service in their corresponding namespaces. - -```bash -$ kubectl create namespace httpbin -namespace/httpbin created -$ kubectl apply -n httpbin -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: httpbin-app - namespace: httpbin - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created - -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: echo-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -# access httpbin service -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# access echo service -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -## Create KongClusterPlugin resource - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header created -``` - -Note how the resource is created at cluster-level and not in any specific -namespace: - -```bash -$ kubectl get kongclusterplugins -NAME PLUGIN-TYPE AGE -add-response-header response-transformer 4s -``` - -If you send requests to `PROXY_IP` now, you will see that the header is not -injected in the responses. The reason being that we have created a -resource but we have not told Kong when to execute the plugin. - -## Configuring plugins on Ingress resources - -We will associate the `KongClusterPlugin` resource with the two Ingress resources -that we previously created: - -```bash -$ kubectl patch ingress -n httpbin httpbin-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/httpbin-app patched - -$ kubectl patch ingress -n echo echo-app -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/echo-app patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching any of the above two Ingress rules is -processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in our two Ingress rules. - -## Updating plugin configuration - -Now, let's update the plugin configuration to change the header value from -`injected-by-kong` to `injected-by-kong-for-kubernetes`: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: add-response-header - annotations: - kubernetes.io/ingress.class: kong -config: - add: - headers: - - "demo: injected-by-kong-for-kubernetes" -plugin: response-transformer -' | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/add-response-header configured -``` - -If you repeat the requests from the last step, you will see Kong -now responds with updated header value. - -This guides demonstrates how plugin configuration can be shared across -services running in different namespaces. -This can prove to be useful if the persona controlling the plugin -configuration is different from service owners that are responsible for the -Service and Ingress resources in Kubernetes. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongingress-resource.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-kongingress-resource.md deleted file mode 100644 index de12faa0ea8f..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongingress-resource.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -title: Using KongIngress resource ---- - -In this guide, we will learn how to use KongIngress resource to control -proxy behavior. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Install a dummy service - -We will start by installing the echo service. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test: - -```bash -$ curl -i $PROXY_IP/foo -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/foo - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/foo -``` - -## Use KongIngress with Ingress resource - -By default, Kong will proxy the entire path to the service. -This can be seen in the real path value in the above response. - -We can configure Kong to strip out the part of the path defined in the -Ingress rule and to only respond to GET requests for this particular rule. - -To modify these behaviours, let's first create a KongIngress resource -defining the new behaviour: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: sample-customization -route: - methods: - - GET - strip_path: true" | kubectl apply -f - -kongingress.configuration.konghq.com/test created -``` - -Now, let's associate this KongIngress resource with our Ingress resource -using the `konghq.com/override` annotation. - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/override":"sample-customization"}}}' -ingress.extensions/demo patched -``` - -Now, Kong will proxy only GET requests on `/foo` path and -strip away `/foo`: - -```bash -$ curl -s $PROXY_IP/foo -X POST -{"message":"no Route matched with those values"} - - -$ curl -s $PROXY_IP/foo/baz - - -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/baz -``` - -As you can see, the real path value is `/baz`. - -## Use KongIngress with Service resource - -KongIngress can be used to change load-balancing, health-checking and other -proxy behaviours in Kong. - -Next, we are going to tweak two settings: - -- Configure Kong to hash the requests based on IP address of the client. -- Configure Kong to proxy all the request on `/foo` to `/bar`. - -Let's create a KongIngress resource with these settings: - -```bash -$ echo 'apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-customization -upstream: - hash_on: ip -proxy: - path: /bar/' | kubectl apply -f - -kongingress.configuration.konghq.com/demo-customization created -``` - -Now, let's associate this KongIngress resource to the echo service. - -```bash -$ kubectl patch service echo -p '{"metadata":{"annotations":{"konghq.com/override":"demo-customization"}}}' -service/echo patched -``` - -Let's test this now: - -```bash -$ curl $PROXY_IP/foo/baz -Hostname: echo-d778ffcd8-vrrtw - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-vrrtw - pod namespace: default - pod IP: 10.60.0.9 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.1.10 - method=GET - real path=/bar/baz - query= - request_version=1.1 - request_scheme=http - request_uri=http://35.233.170.67:8080/bar/baz - -<-- clipped --> -``` - -Real path received by the upstream service (echo) is now changed to `/bar/baz`. - -Also, now all the requests will be sent to the same upstream pod: - -```bash -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -$ curl -s $PROXY_IP/foo | grep "pod IP" - pod IP: 10.60.0.9 -``` - - -You can experiement with various load balancing and healthchecking settings -that KongIngress resource exposes to suit your specific use case. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongplugin-resource.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-kongplugin-resource.md deleted file mode 100644 index 02ce7664b7a4..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-kongplugin-resource.md +++ /dev/null @@ -1,469 +0,0 @@ ---- -title: Using KongPlugin resource ---- - -In this guide, we will learn how to use KongPlugin resource to configure -plugins in Kong to modify requests for a specific request path. -The guide will cover configuring a plugin for a specific service, a set of Ingress rules -and for a specific user of the API. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -If you've not done so, please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Installing sample services - -We will start by installing two services, -an echo service and an httpbin service. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Setup Ingress rules - -Let's expose these services outside the Kubernetes cluster -by defining Ingress rules. - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: httpbin - servicePort: 80 - - path: /bar - backend: - serviceName: echo - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Let's test these endpoints: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:38:00 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Transfer-Encoding: chunked -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:38:17 GMT -Server: echoserver -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - - - -Hostname: echo-d778ffcd8-n9bss - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-8pgh - pod name: echo-d778ffcd8-n9bss - pod namespace: default - pod IP: 10.60.0.4 -<-- clipped -- > -``` - -Let's add another Ingress resource which proxies requests to `/baz` to httpbin -service: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-2 - annotations: - konghq.com/strip-path: "true" - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /baz - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo-2 created -``` - -We will use this path later. - -## Configuring plugins on Ingress resource - -Next, we will configure two plugins on the Ingress resource. - -First, we will create a KongPlugin resource: - -```bash -$ echo ' -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: add-response-header -config: - add: - headers: - - "demo: injected-by-kong" -plugin: response-transformer -' | kubectl apply -f - -kongplugin.configuration.konghq.com/add-response-header created -``` - -Next, we will associate it with our Ingress rules: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"add-response-header"}}}' -ingress.extensions/demo patched -``` - -Here, we are asking the {{site.kic_product_name}} to execute the response-transformer -plugin whenever a request matching the Ingress rule is processed. - -Let's test it out: - -```bash -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:54:31 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 21:54:39 GMT -Server: echoserver -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As can be seen in the output, the `demo` header is injected by Kong when -the request matches the Ingress rules defined in the `demo` Ingress resource. - -If we send a request to `/baz`, then we can see that the header is not injected -by Kong: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 21:56:20 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Here, we have successfully setup a plugin which is executed only when a -request matches a specific `Ingress` rule. - -## Configuring plugins on Service resource - -Next, we will see how we can configure Kong to execute plugins for requests -which are sent to a specific service. - -Let's add a `KongPlugin` resource for authentication on the httpbin service: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: httpbin-auth -plugin: key-auth -" | kubectl apply -f - - -kongplugin.configuration.konghq.com/httpbin-auth created -``` - -Next, we will associate this plugin to the httpbin service running in our -cluster: - -```bash -$ kubectl patch service httpbin -p '{"metadata":{"annotations":{"konghq.com/plugins":"httpbin-auth"}}}' -service/httpbin patched -``` - -Now, any request sent to the service will require authentication, -no matter which `Ingress` rule it matched: - -```bash -$ curl -I $PROXY_IP/baz -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:09:04 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -Server: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -HTTP/1.1 401 Unauthorized -Date: Wed, 17 Jul 2019 22:12:13 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -WWW-Authenticate: Key realm="kong" -Content-Length: 41 -demo: injected-by-kong -Server: kong/1.2.1 -``` - -You can also see how the `demo` header was injected as the request also -matched one of the rules defined in the `demo` `Ingress` resource. - -## Configure consumer and credential - -Follow the [Using Consumers and Credentials](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource/) -guide to provision a user and an apikey. -Once you have it, please continue: - -Use the API key to pass authentication: - -```bash -$ curl -I $PROXY_IP/baz -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:16:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:15:34 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 -``` - -## Configure a global plugin - -Now, we will protect our Kubernetes cluster. -For this, we will be configuring a rate-limiting plugin, which -will throttle requests coming from the same client. - -This must be a cluster-level `KongClusterPlugin` resource, as `KongPlugin` -resources cannot be applied globally, to preserve Kubernetes RBAC guarantees -for cross-namespace isolation. - -Let's create the `KongClusterPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: global-rate-limit - annotations: - kubernetes.io/ingress.class: kong - labels: - global: \"true\" -config: - minute: 5 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongclusterplugin.configuration.konghq.com/global-rate-limit created -``` - -With this plugin (please note the `global` label), every request through -the {{site.kic_product_name}} will be rate-limited: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Configure a plugin for a specific consumer - -Now, let's say we would like to give a specific consumer a higher rate-limit. - -For this, we can create a `KongPlugin` resource and then associate it with -a specific consumer. - -First, create the `KongPlugin` resource: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: harry-rate-limit -config: - minute: 10 - limit_by: consumer - policy: local -plugin: rate-limiting -" | kubectl apply -f - -kongplugin.configuration.konghq.com/harry-rate-limit created -``` - -Next, associate this with the consumer: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong - konghq.com/plugins: harry-rate-limit -username: harry -credentials: -- harry-apikey" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry configured -``` - -Note the annotation being added to the `KongConsumer` resource. - -Now, if the request is made as the `harry` consumer, the client -will be rate-limited differently: - -```bash -$ curl -I $PROXY_IP/foo -H 'apikey: my-sooper-secret-key' -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 9593 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 22:34:10 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-RateLimit-Limit-minute: 10 -X-RateLimit-Remaining-minute: 9 -demo: injected-by-kong -X-Kong-Upstream-Latency: 3 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -# a regular unauthenticated request -$ curl -I $PROXY_IP/bar -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Wed, 17 Jul 2019 22:34:14 GMT -Server: echoserver -X-RateLimit-Limit-minute: 5 -X-RateLimit-Remaining-minute: 4 -demo: injected-by-kong -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -This guide demonstrates how you can use the {{site.kic_product_name}} to -impose restrictions and transformations -on various levels using Kubernetes style APIs. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-mtls-auth-plugin.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-mtls-auth-plugin.md deleted file mode 100644 index 99747dde32bd..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-mtls-auth-plugin.md +++ /dev/null @@ -1,321 +0,0 @@ ---- -title: Using mtls-auth plugin ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -verify client certificates using CA certificates and -[mtls-auth](https://docs.konghq.com/hub/kong-inc/mtls-auth/) plugin -for HTTPS requests. - -> Note: You need an Enterprise license to use this feature. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -Kong for Kubernetes Enterprise on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise) to configure -this environment variable. - -If everything is set up correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Provision a CA certificate in Kong - -CA certificates in Kong are provisioned by create a `Secret` resource in -Kubernetes. - -The secret resource must have a few properties: -- It must have the `konghq.com/ca-cert: "true"` label. -- It must have a `cert` data property which contains a valid CA certificate - in PEM format. -- It must have an `id` data property which contains a random UUID. -- It must have a `kubernetes.io/ingress.class` annotation whose value matches - the value of the controller's `--ingress-class` argument. By default, that - value is "kong". - -Note that a self-signed CA certificate is being used for the purpose of this -guide. You should use your own CA certificate that is backed by -your PKI infrastructure. - -**The example here is only used to show the YAML format of `Secret` resource for CA certificate. DO NOT directly use the certificate here. -You should use your own CA certificate, or generate a self-signed certificate for testing.** - -```bash -$ echo "apiVersion: v1 -kind: Secret -metadata: - name: my-ca-cert - annotations: - kubernetes.io/ingress.class: kong - labels: - konghq.com/ca-cert: 'true' -type: Opaque -stringData: - cert: | - -----BEGIN CERTIFICATE----- - MIICwTCCAamgAwIBAgIUHGUzUWvHJHrREvIZIcORiFUvze4wDQYJKoZIhvcNAQEL - BQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcNMjAwNTA4MjExODA1WhcNMjAwNjA3MjEx - ODA1WjAQMQ4wDAYDVQQDDAVIZWxsbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC - AQoCggEBANCMMBngjuTvqts8ZXtZhqdr181QH/NmytW1KlyqZd6ppXUer+i0OWhP - 1nAyHsBPJljKAFLd8l1EioPFkN78/wJFDJrHOtfniIQPVLdS2cnNQ72dLyQH6smH - JQDV8ePBQ2GdRP6s61+Da8eoaW6nSLtmEUhxvyteboqwmi2CtUtAfuiU1m5sOdpS - z+L4D08CE+SFIT4MGD3gxNdg7lccWCHIfk54VRSdGDKEVwed8OQvxD0TdpHY+ym5 - nJ4JSkhiS9XIodnxR3AZ6rIPRqk+MQ4LGTjX2EbM0/Yg4qvnZ7m4fcpK2goDZIVL - EF8F+ka1RaAYWTsXI1BAkJbb3kdo/yUCAwEAAaMTMBEwDwYDVR0TBAgwBgEB/wIB - ADANBgkqhkiG9w0BAQsFAAOCAQEAVvB/PeVZpeQ7q2IQQQpADtTd8+22Ma3jNZQD - EkWGZEQLkRws4EJNCCIvkApzpx1GqRcLLL9lbV+iCSiIdlR5W9HtK07VZ318gpsG - aTMNrP9/2XWTBzdHWaeZKmRKB04H4z7V2Dl58D+wxjdqNWsMIHeqqPNKGamk/q8k - YFNqNwisRxMhU6qPOpOj5Swl2jLTuVMAeGWBWmPGU2MUoaJb8sc2Vix9KXcyDZIr - eidkzkqSrjNzI0yJ2gdCDRS4/Rw9iV3B3SRMs0mJMLBDrsowhNfLAd8I3NHzLwps - dZFcvZcT/p717K3hlFVdjGnKIgKcG7aYji/XRR87HKnc+cJMCw== - -----END CERTIFICATE----- - id: cce8c384-721f-4f58-85dd-50834e3e733a" | kubectl create -f - -secret/my-ca-cert created -``` - -Please note the ID, you can use this ID one or use a different one but -the ID is important in the next step when we create the plugin. -Each CA certificate that you create needs a unique ID. -Any random UUID will suffice here and it doesn't have an security -implication. - -You can use [uuidgen](https://linux.die.net/man/1/uuidgen) (Linux, OS X) or -[New-Guid](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/new-guid) -(Windows) to generate an ID. - -For example: -```bash -$ uuidgen -907821fc-cd09-4186-afb5-0b06530f2524 -``` - -## Configure mtls-auth plugin - -Next, we are going to create an `mtls-auth` KongPlugin resource which references -CA certificate provisioned in the last step: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: mtls-auth -config: - ca_certificates: - - cce8c384-721f-4f58-85dd-50834e3e733a - skip_consumer_lookup: true - revocation_check_mode: SKIP -plugin: mtls-auth -" | kubectl apply -f - -kongplugin.configuration.konghq.com/mtls-auth created -``` - -## Install a dummy service - -Let's deploy an echo service which we wish to protect -using TLS client certificate authentication. - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -You can deploy a different service or skip this step if you already -have a service deployed in Kubernetes. - -## Set up Ingress - -Let's expose the echo service outside the Kubernetes cluster -by defining an Ingress. - -```bash -$ echo " -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/plugins: mtls-auth - kubernetes.io/ingress.class: kong -spec: - rules: - - http: - paths: - - path: /foo - backend: - serviceName: echo - servicePort: 80 -" | kubectl apply -f - -ingress.extensions/demo created -``` - -## Test the endpoint - -Now, let's test to see if Kong is asking for client certificate -or not when we make the request: - -``` -$ curl -k https://$PROXY_IP -HTTP/2 401 -date: Mon, 11 May 2020 18:15:05 GMT -content-type: application/json; charset=utf-8 -content-length: 50 -x-kong-response-latency: 0 -server: kong/2.0.4.0-enterprise-k8s - -{"message":"No required TLS certificate was sent"} -``` - -As we can see, Kong is restricting the request because it doesn't -have the necessary authentication information. - -Two things to note here: -- `-k` is used because Kong is set up to serve a self-signed certificate - by default. For full mutual authentication in production use cases, - you must configure Kong to serve a certificate that is signed by a trusted CA. -- For some deployments `$PROXY_IP` might contain a port that points to - `http` port of Kong. In others, it might happen that it contains a DNS name - instead of an IP address. If needed, please update the - command to send an `https` request to the `https` port of Kong or - the load balancer in front of it. - - -## Provisioning credential - -Next, in order to authenticate against Kong, create the client -certificate and private key with the following content: - -{:.important} ->This example is only used to show the format of the client certificate and private key. **DO NOT** use the certificate and private key directly. - -You should use certificate and private key signed by your own CA. This is how you generate a self-signed CA certificate. - -```bash -openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes\ - -subj "/C=US/ST=California/L=San Francisco/O=Kong/OU=Org/CN=www.example.com" -``` - -```bash -$ cat client.crt ------BEGIN CERTIFICATE----- -MIIEFTCCAv0CAWUwDQYJKoZIhvcNAQELBQAwEDEOMAwGA1UEAwwFSGVsbG8wHhcN -MjAwNTA4MjE0OTE1WhcNMjEwNTA4MjE0OTE1WjCBkDELMAkGA1UEBhMCQVUxEzAR -BgNVBAgMClNvbWUtU3RhdGUxDTALBgNVBAcMBHNvbWUxETAPBgNVBAoMCHNvbWUg -b3JnMRAwDgYDVQQLDAdvcmd1bml0MRswGQYDVQQDDBJleGFtcGxlLmtvbmdocS5j -b20xGzAZBgkqhkiG9w0BCQEWDGZvb0Bzb21lLmNvbTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBAM/y80ppzwGYS7zl+A6fx4Xkjwja+ZUK/AoBDazS3TkR -W1tDFZ71koLd60qK2W1d9Wh0/F3iNTcobVefr02mEcLtl+d4zUug+W7RsK/8JSCM -MIDVDYzlTWdd7RJzV1c/0NFZyTRkEVSjGn6eQoC/1aviftiNyfqWtuIDQ5ctSBt8 -2fyvDwu/tBR5VyKu7CLnjZ/ffjNT8WDfbO704XeBBId0+L8i8J7ddYlRhZufdjEw -hKx2Su8PZ9RnJYShTBOpD0xdveh16eb7dpCZiPnp1/MOCyIyo1Iwu570VoMde9SW -sPFLdUMiCXw+A4Gp/e9Am+D/98PiL4JChKsiowbzpDfMrVQH4Sblpcgn/Pp+u1be -2Kl/7wqr3TA+w/unLnBnB859v3wDhSW4hhKASoFwyX3VfJ43AkmWFUBX/bpDvHto -rFw+MvbSLsS3QD5KlZmega1pNZtin5KV8H/oJI/CjEc9HHwd27alW9VkUu0WrH0j -c98wLHB/9xXLjunabxSmd+wv25SgYNqpsRNOLgcJraJbaRh4XkbDyuvjF2bRJVP4 -pIjntxQHS/oDFFFK3wc7fp/rTAl0PJ7tytYj4urg45N3ts7unwnB8WmKzD9Avcwe -8Kst12cEibS8X2sg8wOqgB0yarC17mBEqONK7Fw4VH+VzZYw0KGF5DWjeSXj/XsD -AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEvTMHe27npmyJUBxQeHcNFniMJUWZf0 -i9EGd+XlF+m/l3rh1/mCecV7s32QTZEiFHv4UJPYASbgtx7+mEZuq7dVsxIUICWs -gyRkwvKjMqK2tR5IRkquhK5PuDS0QC3M/ZsDwnTgaezFrplFYf80z1kAAkm/c7eh -ZEjI6+1vuaS+HX1w2unk42PiAEB6oKFi3b8xl4TC6acYfMYiC3cOa/d3ZKHhqXhT -wM0VtDe0Qn1kExe+19XJG5cROelxmMXBm1+/c2KUw1yK8up6kJlEsmd8JLw/wMUp -xcJUKIH1qGBlRlFTYbVell+dB7IkHhadrnw27Z47uHobB/lzN69r63c= ------END CERTIFICATE----- -``` - -```bash -$ cat client.pem ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAz/LzSmnPAZhLvOX4Dp/HheSPCNr5lQr8CgENrNLdORFbW0MV -nvWSgt3rSorZbV31aHT8XeI1NyhtV5+vTaYRwu2X53jNS6D5btGwr/wlIIwwgNUN -jOVNZ13tEnNXVz/Q0VnJNGQRVKMafp5CgL/Vq+J+2I3J+pa24gNDly1IG3zZ/K8P -C7+0FHlXIq7sIueNn99+M1PxYN9s7vThd4EEh3T4vyLwnt11iVGFm592MTCErHZK -7w9n1GclhKFME6kPTF296HXp5vt2kJmI+enX8w4LIjKjUjC7nvRWgx171Jaw8Ut1 -QyIJfD4Dgan970Cb4P/3w+IvgkKEqyKjBvOkN8ytVAfhJuWlyCf8+n67Vt7YqX/v -CqvdMD7D+6cucGcHzn2/fAOFJbiGEoBKgXDJfdV8njcCSZYVQFf9ukO8e2isXD4y -9tIuxLdAPkqVmZ6BrWk1m2KfkpXwf+gkj8KMRz0cfB3btqVb1WRS7RasfSNz3zAs -cH/3FcuO6dpvFKZ37C/blKBg2qmxE04uBwmtoltpGHheRsPK6+MXZtElU/ikiOe3 -FAdL+gMUUUrfBzt+n+tMCXQ8nu3K1iPi6uDjk3e2zu6fCcHxaYrMP0C9zB7wqy3X -ZwSJtLxfayDzA6qAHTJqsLXuYESo40rsXDhUf5XNljDQoYXkNaN5JeP9ewMCAwEA -AQKCAgAt5cC/HuV6w6OL2PJMQAXozo6ndLV7qQYCK0Nabtw3GVahqQffovIoglTJ -iiX9Vqyw1olRK3l1aC3iFjO6Hrpy3MAXbflaBPio9g1aenuzWF3oQZ4RCBdyhi+q -T9zqPAKaAog/UQrmNG3HnqStCCpgGsjGgV0gOx24euHzPyJYNtFiTT0z6acUkcei -txsVhSgkLk8Lgy6WpBnGEDSnjMl0IGQ6w7p6RgUIPv8PXz3WE5BlBGL7qtnO7slA -Id3JxRnEUDh3U3dB7SS5O7oY9v0b/3CDpsuXe3wd1G432E0Zmf0J9Q63t46CZqmd -d+i9YvRE0BpemNDFnmrr3uQ+x43qARtonEELirY99aW0hUUfD7PieLNnZP7tueVB -J80GUU5ckJhn9u6SlKZtvBU2mAWvaKZEv1+9vDh4Le8fNtubpC5YtSKztc66csL6 -DLtyi81iftpF2YtDVKK8UB35KyG/0IWkXyfquOkYuL8RwrJR9tNL1+Zs4GqgC5sH -fVIwR6/+w/kpeH9nP8/0VaXRoqCjKQpVjFg9f645rZQ/OzcnQNv6k8Sr+4zHaHog -uFwOo7p4QfIPIBfU8+8RD36C5U/p5PiouR8sN+rfDCu0N07XKmHAphlqvjTR+OG/ -J5o3jGgAerMZn3gmiGUS+IdmrPw7we8gc8j8E8C6TjvlALQNOQKCAQEA6ySvPyMw -hiqfa9TeYce1gI2HtRyiCM1r7otFmTqS/I53he7b9LAZ5+gqXxMS/PW9QTvKET2q -vRU+xZYD4h/i9R+qZT3s7EsNBXBQHkvh0m0qNRtrsSgAYCWLsI/0nUOKIz6obHu5 -5SxS8y3y1t9SoXvWpzTpAnuk91BVMtSephf/4/hXlH2d1WnOC0SqS979aRrm8NE/ -rdT5qchhySyfZkYbADxy5AHHqoFTtkxGnLvcbY0X/oJI3zNYCFKTFNmb6/61cxuB -REjwREUFOhneXYb9mBG4bxuteCz65MyshiN1EAsVMnI6aEuHR6EAvt1Jslv7Qi1a -2UKM61XcL8m/lQKCAQEA4mTGaoZJ1yz+TCKMuae33Y9assXOYAQpdb3MY2UTgzQg -JAZYmwaAsBaC1e49G0eYVAP+eDI4u0OR0f0CW9Pf+OdGRWuZfVum0d+PmcIhJfgM -jXsR4CJpPcX7VZLHMZ77QFDh/xBHNXR8F1latPXFYR3ytcXxl4HEtodDpS84AgiO -57yPitY78MS16l3GJGWlgDdRP/LvVixugH2steHCtk8l932/qayUeezbYSEhyQ6L -13f0qRaBhvRsoULj3HvQWNPxmGYK3p+N+zXc1CErF6x8sDq4jeXyNg+26gZknea8 -3SEKKx+Wf4vT3rlUEeYy0uFubG06qYCdtj2ZuSOKNwKCAQEAgJpQqkRRrp8zD6Od -eHbkIonFVd1yFURcKlvLVdF+QFiesAaCD+IcWQRV4Cerc+PmfP35NtK2RbGP4jp4 -pzxvQUbvR23F3Tnyxi2188vmltKTifYUQRCym+MM8iTZUQV2UG5daO+GLPu/5jYU -IUaEh8MWE97RLUV4ZLZv0lwM5KQtlH3nUFQfdW/ne6wzQ0mS6OAIvF6E6EqZvSzV -plQcXbAr5kYpQ+BhVjRjF0nCOMhZ9yR6ofyZZFFNbUfUH0wghcKJdInveew2U/A3 -up4ZSqegnIHckA/gIODg2y/Bj59mz75v+mYU4aOlOhypLroSK1U5JultTXOjZLZR -tWUuvQKCAQAVcti9hOWABlo9WlSczkAENK2yHD12KU7IQegYTp4vowOchiFk5pPQ -mwFgRUbyy7Cp3QsB1jg7vaYWD/NmQceJbFfjAdOz5bgDUDvppFPBpiOCT/OcmYYA -/T3XmKVYlShWqpMOuDsW3GdZSvTmChbeIZk6EXvXD8tUQ7Jr9vJGdwsa92leDPf2 -0pwtjR7Vme+5GwSOm3SDZIg/kiiHvtDUtuDw9q/u4lPazU7nf90UkFU9X7cFQgWZ -hJS6Hn06CVzu3X2ZI6nJ97Ha5/p4+n97qbLSe226u9tbtddtipeDwjWIebXd6gs3 -IEc9Za+KVpXgFs2AZkTVhELs3h8vRCe3AoIBAQDRr0k5OePCsDbs6RadGI9Ta+pf -I30u8imKw8Rih++127UPjpc8OCzaQNvWnpdAoJTgo12fQJqGigRUfJMFFQn7u3jz -ggAq9WLRsXRZpEXk8NXDr/WhksOoWmkxLf4uNO7l2AytIFqZbb1pmTd0g+np2yBE -8VgDR45IxbGPQLsTzKXeXJuXOi7ut2ehJ+VgsS84BsRTeO4v+Y2qpGcyw6fXtU3E -NDrWe/C5QceILtDcd+JiXUgKrHRK+qrfawoxPBDVhYJ+N/Y7SqvZ2GvxibnRs8YA -cbhEebkfUHRQSEqkPr+ndRHInwWTMAWF4IhSuQOpTvT7PY7UNet2io8W8Py6 ------END RSA PRIVATE KEY----- -``` - -Now, use the key and certificate to authenticate against Kong and use the -service: - -```bash -$ curl --key client.key --cert client.crt https://$PROXY_IP/foo -k -I -HTTP/2 200 -content-type: text/plain; charset=UTF-8 -date: Mon, 11 May 2020 18:27:22 GMT -server: echoserver -x-kong-upstream-latency: 1 -x-kong-proxy-latency: 1 -via: kong/2.0.4.0-enterprise-k8s -``` - -## Conclusion - -This guide demonstrates how to implement client TLS authentication -using Kong. -You are free to use other features that mtls-auth plugin in Kong to -achieve more complicated use-cases. diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-oidc-plugin.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-oidc-plugin.md deleted file mode 100644 index 532a1ea200ce..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-oidc-plugin.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Using OIDC plugin ---- - -{{site.ee_product_name}}'s OIDC plugin can authenticate requests using OpenID Connect protocol. -This guide shows a basic example of how to setup the OIDC plugin using -the Ingress Controller. - -> Note: This works only with Enterprise version of Kong. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) documentation -to install enterprise version of the {{site.kic_product_name}}. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: 192.0.2.8.xip.io - http: - paths: - - path: / - backend: - serviceName: httpbin - servicePort: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -We are using `192.0.2.8.xip.io` as our host, you can use any domain name -of your choice. A domain name is a prerequisite for this guide. -For demo purpose, we are using [xip.io](http://xip.io) -service to avoid setting up a DNS record. - -Test the Ingress rule: - -```bash -$ curl -i 192.0.2.8.xip.io/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Next, open a browser and browse to `http://192.0.2.8.xip.io`. -You should see landing page same as httpbin.org. - -## Setup OIDC plugin - -Now we are going to protect our dummy service with OpenID Connect -protocol using Google as our identity provider. - -First, set up an OAuth 2.0 application in -[Google](https://developers.google.com/identity/protocols/oauth2/openid-connect). - -Once you have setup your application in Google, use the client ID and client -secret and create a KongPlugin resource in Kubernetes: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: oidc-auth -config: - issuer: https://accounts.google.com/.well-known/openid-configuration - client_id: - - - client_secret: - - - redirect_uri: - - http://192.0.2.8.xip.io -plugin: openid-connect -" | kubectl apply -f - -kongplugin.configuration.konghq.com/oidc-auth created -``` - -The `redirect_uri` parameter must be a URI that matches the Ingress rule we -created earlier. You must also [add it to your Google OIDC -configuration](https://developers.google.com/identity/protocols/oauth2/openid-connect#setredirecturi) - -Next, enable the plugin on our Ingress: - -```bash -$ kubectl patch ing demo -p '{"metadata":{"annotations":{"konghq.com/plugins":"oidc-auth"}}}' -ingress.extensions/demo patched -``` -## Test - -Now, if you visit the host you have set up in your Ingress resource, -Kong should redirect you to Google to verify your identity. -Once you identify yourself, you should be able to browse our dummy service -once again. - -This basic configuration permits any user with a valid Google account to access -the dummy service. -For setting up more complicated authentication and authorization flows, -please read -[plugin docs](/gateway/latest/configure/auth/oidc-google/). diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-rewrites.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-rewrites.md deleted file mode 100644 index cdba8d4bb91f..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-rewrites.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Rewriting hosts and paths ---- -This guide demonstrates host and path rewrites using Ingress and Service configuration. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Create a test Deployment - -To test our requests, we create an echo server Deployment, which responds to -HTTP requests with a summary of the request contents: - -```bash -$ kubectl create namespace echo -namespace/echo created -$ kubectl apply -n echo -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -After completing the examples in the guide, you can clean up the example -configuration with `kubectl delete namespace echo`. - -For your actual production configuration, replace `echo` with whatever -namespace you use to run your application. - -## Create a Kubernetes service - -First, create a Kubernetes Service: - -```bash -echo " -apiVersion: v1 -kind: Service -metadata: - name: echo - namespace: echo -spec: - selector: - app: echo - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 80 -" | kubectl apply -f - -``` - -When referenced by an Ingress, this Service will create a Kong service and -upstream that uses the upstream IPs (Pod IPs) for its `Host` header and appends -request paths starting at `/`. - -## Create an Ingress to expose the service at the path `/myapp` on `example.com` - -```bash -echo ' -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: my-app - namespace: echo - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: myapp.example.com - http: - paths: - - path: /myapp - backend: - serviceName: echo - servicePort: 80 -' | kubectl create -f - -``` - -This Ingress will create a Kong route attached to the service we created above. -It will preserve its path but honor the service's hostname, so this request: - -```bash -$ curl -svX GET http://myapp.example.com/myapp/foo --resolve myapp.example.com:80:$PROXY_IP -GET /myapp/foo HTTP/1.1 -Host: myapp.example.com -User-Agent: curl/7.70.0 -Accept: */* -``` -will appear upstream as: - -``` -GET /myapp/foo HTTP/1.1 -Host: 10.16.4.8 -User-Agent: curl/7.70.0 -Accept: */* -``` - -We'll use this same cURL command in other examples as well. - -Actual output from cURL and the echo server will be more verbose. These -examples are condensed to focus primarily on the path and Host header. - -Note that this default behavior uses `strip_path=false` on the route. This -differs from Kong's standard default to conform with expected ingress -controller behavior. - -## Rewriting the host - -There are two options to override the default `Host` header behavior: - -- Add the [`konghq.com/host-header` annotation][1] to your Service, which sets - the `Host` header directly: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/host-header":"internal.myapp.example.com"}}}' - ``` - The request upstream will now use the header from that annotation: - ``` - GET /myapp/foo HTTP/1.1 - Host: internal.myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/preserve-host` annotation][0] to your Ingress, which - sends the route/Ingress hostname: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/preserve-host":"true"}}}' - ``` - The request upstream will now include the hostname from the Ingress rule: - ``` - GET /myapp/foo HTTP/1.1 - Host: myapp.example.com - User-Agent: curl/7.70.0 - Accept: */* - ``` - -The `preserve-host` annotation takes precedence, so if you add both annotations -above, the upstream host header will be `myapp.example.com`. - -## Rewriting the path - -There are two options to rewrite the default path handling behavior: - -- Add the [`konghq.com/strip-path` annotation][2] to your Ingress, which strips - the path component of the route/Ingress, leaving the remainder of the path at - the root: - ```bash - $ kubectl patch -n echo ingress my-app -p '{"metadata":{"annotations":{"konghq.com/strip-path":"true"}}}' - ``` - The request upstream will now only contain the path components not in the - Ingress rule: - ``` - GET /foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -- Add the [`konghq.com/path` annotation][3] to your Service, which prepends - that value to the upstream path: - ```bash - $ kubectl patch -n echo service echo -p '{"metadata":{"annotations":{"konghq.com/path":"/api"}}}' - ``` - The request upstream will now contain a leading `/api`: - ``` - GET /api/myapp/foo HTTP/1.1 - Host: 10.16.4.8 - User-Agent: curl/7.70.0 - Accept: */* - ``` -`strip-path` and `path` can be combined together, with the `path` component -coming first. Adding both annotations above will send requests for `/api/foo`. - -[0]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompreserve-host -[1]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomhost-header -[2]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcomstrip-path -[3]: /kubernetes-ingress-controller/{{page.release}}/references/annotations/#konghqcompath diff --git a/app/kubernetes-ingress-controller/1.3.x/guides/using-tcpingress.md b/app/kubernetes-ingress-controller/1.3.x/guides/using-tcpingress.md deleted file mode 100644 index dd759e971f62..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/guides/using-tcpingress.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: TCPIngress with Kong ---- - -This guide walks through using TCPIngress Custom Resource -resource to expose TCP-based services running in Kubernetes to the out -side world. - -## Overview - -TCP-based Ingress means that Kong simply forwards the TCP stream to a Pod -of a Service that's running inside Kubernetes. Kong will not perform any -sort of transformations. - -There are two modes avaialble: -- **Port based routing**: In this mode, Kong simply proxies all traffic it - receives on a specific port to the Kubernetes Service. TCP connections are - load balanced across all the available pods of the Service. -- **SNI based routing**: In this mode, Kong accepts a TLS-encrypted stream - at the specified port and can route traffic to different services based on - the `SNI` present in the TLS handshake. Kong will also terminate the TLS - handshake and forward the TCP stream to the Kubernetes Service. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -> **Note**: This feature works with Kong versions 2.0.4 and above. - -> **Note**: This feature is available in Controller versions 0.8 and above. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Configure Kong for new ports - -First, we will configure Kong's Deployment and Service to expose two new ports -9000 and 9443. Port 9443 expects a TLS connection from the client. - -```shell -$ kubectl patch deploy -n kong ingress-kong --patch '{ - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "proxy", - "env": [ - { - "name": "KONG_STREAM_LISTEN", - "value": "0.0.0.0:9000, 0.0.0.0:9443 ssl" - } - ], - "ports": [ - { - "containerPort": 9000, - "name": "stream9000", - "protocol": "TCP" - }, - { - "containerPort": 9443, - "name": "stream9443", - "protocol": "TCP" - } - ] - } - ] - } - } - } -}' -deployment.extensions/ingress-kong patched -``` - -```shell -$ kubectl patch service -n kong kong-proxy --patch '{ - "spec": { - "ports": [ - { - "name": "stream9000", - "port": 9000, - "protocol": "TCP", - "targetPort": 9000 - }, - { - "name": "stream9443", - "port": 9443, - "protocol": "TCP", - "targetPort": 9443 - } - ] - } -}' -service/kong-proxy patched -``` - -You are free to choose other ports as well. - -## Install TCP echo service - -Next, we will install a dummy TCP service. -If you already have a TCP-based service running in your cluster, -you can use that as well. - -```shell -$ kubectl apply -f https://bit.ly/tcp-echo -deployment.apps/tcp-echo created -service/tcp-echo created -``` - -Now, we have a TCP echo service running in Kubernetes. -We will now expose this on plain-text and a TLS based port. - -## TCP port based routing - -To expose our service to the outside world, create the following -`TCPIngress` resource: - -```shell -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-plaintext - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - port: 9000 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-plaintext created -``` - -Here we are instructing Kong to forward all traffic it receives on port -9000 to `tcp-echo` service on port 2701. - -Once created, we can see the IP address at which this is available: - -```shell -$ kubectl get tcpingress -NAME ADDRESS AGE -echo-plaintext 3m18s -``` - -Lets connect to this service using `telnet`: - -```shell -$ telnet $PROXY_IP 9000 -Trying 35.247.39.83... -Connected to 35.247.39.83. -Escape character is '^]'. -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^] -telnet> Connection closed. -``` - -We can see here that the `tcp-echo` service is now available outside the -Kubernetes cluster via Kong. - -## TLS SNI based routing - -Next, we will demonstrate how Kong can help expose the `tcp-echo` service -in a secure manner to the outside world. - -Create the following TCPIngress resource: - -``` -$ echo "apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: echo-tls - annotations: - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - port: 9443 - backend: - serviceName: tcp-echo - servicePort: 2701 -" | kubectl apply -f - -tcpingress.configuration.konghq.com/echo-tls created -``` - -Now, we can access the `tcp-echo` service on port 9443, on SNI `example.com`. - -You should setup a DNS record for a Domain that you control -to point to PROXY_IP and then access -the service via that for production usage. - -In our contrived demo example, we can connect to the service via TLS -using `openssl`'s `s_client` command: - -```shell -$ openssl s_client -connect $PROXY_IP:9443 -servername example.com -quiet -openssl s_client -connect 35.247.39.83:9443 -servername foo.com -quiet -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify error:num=18:self signed certificate -verify return:1 -depth=0 C = US, ST = California, L = San Francisco, O = Kong, OU = IT Department, CN = localhost -verify return:1 -Welcome, you are connected to node gke-harry-k8s-dev-pool-1-e9ebab5e-c4gw. -Running on Pod tcp-echo-844545646c-gvmkd. -In namespace default. -With IP address 10.60.1.17. -This text will be echoed back. -This text will be echoed back. -^C -``` - -Since Kong is not configured with a TLS cert-key pair for `example.com`, Kong -is returning a self-signed default certificate, which is not trusted. -You can also see that the echo service is running as expected. - -## Bonus - -Scale the `tcp-echo` Deployment to have multiple replicas and observe how -Kong load-balances the TCP-connections between pods. - -## Conclusion - -In this guide, we see how to use Kong's TCP routing capabilities using -TCPIngress Custom Resource. This can be very useful if you have services -running inside Kubernetes that have custom protocols instead of the more -popular HTTP or gRPC protocols. diff --git a/app/kubernetes-ingress-controller/1.3.x/index.md b/app/kubernetes-ingress-controller/1.3.x/index.md deleted file mode 100644 index e7c96ce953df..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/index.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Kong Ingress Controller -subtitle: An ingress controller for the {{site.base_gateway}} ---- - -## Concepts - -### Architecture - -The [design document][design] explains how the {{site.kic_product_name}} works -inside a Kubernetes cluster and configures Kong to proxy traffic as per -rules defined in the Ingress resources. - -### Custom Resources - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, the `KongIngress` Custom resource is used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Refer to the [custom resources concept document][crd] for more details. - -### Deployment Methods - -The {{site.kic_product_name}} can be deployed in a variety of deployment patterns. -Refer to the [deployment documentation](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), -which explains all the components -involved and different ways of deploying them based on the use-case. - -### High-availability and Scaling - -The {{site.kic_product_name}} is designed to scale with your traffic -and infrastructure. -Refer to the [High-availability and Scaling guide](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) to understand -failures scenarios, recovery methods, as well as scaling considerations. - -### Ingress classes - -[Ingress classes](/kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes) filter which resources the -controller loads. They ensure that {{site.kic_product_name}} instances do not -load configuration intended for other instances or other ingress controllers. - -### Security - -Refer to the [Security concepts guide](/kubernetes-ingress-controller/{{page.release}}/concepts/security/) to understand the -default security settings and how to further secure the Ingress Controller. - -## Guides and Tutorials - -Browse through the [how-to guides][guides] to get started or understand how to configure -a specific setting with the {{site.kic_product_name}}. - -## Configuration Reference - -The configurations in the {{site.kic_product_name}} can be tweaked using -Custom Resources and annotations. -Please refer to the following documents detailing this process: - -- [Custom Resource Definitions](/kubernetes-ingress-controller/{{page.release}}/references/custom-resources/) -- [Annotations](/kubernetes-ingress-controller/{{page.release}}/references/annotations/) -- [CLI arguments](/kubernetes-ingress-controller/{{page.release}}/references/cli-arguments/) -- [Version compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/version-compatibility/) -- [Plugin compatibility matrix](/kubernetes-ingress-controller/{{page.release}}/references/plugin-compatibility/) - -## FAQs - -[FAQs][faqs] will help find answers to common problems quickly. -Please feel free to open Pull Requests to contribute to the list. - -## Troubleshooting - -Please read through our [deployment guide][deployment] for a detailed -understanding of how Ingress Controller is designed and deployed -along alongside Kong. - -- [FAQs][faqs] might help as well. -- [Troubleshooting guide][troubleshooting] can help - resolve some issues. - Please contribute back if you feel your experience can help - the larger community. - -[annotations]: /kubernetes-ingress-controller/{{page.release}}/references/annotations -[crd]: /kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources -[deployment]: /kubernetes-ingress-controller/{{page.release}}/deployment/overview -[design]: /kubernetes-ingress-controller/{{page.release}}/concepts/design -[faqs]: /kubernetes-ingress-controller/{{page.release}}/faq -[troubleshooting]: /kubernetes-ingress-controller/{{page.release}}/troubleshooting -[guides]: /kubernetes-ingress-controller/{{page.release}}/guides/overview diff --git a/app/kubernetes-ingress-controller/1.3.x/references/annotations.md b/app/kubernetes-ingress-controller/1.3.x/references/annotations.md deleted file mode 100644 index acd73cdeee86..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/references/annotations.md +++ /dev/null @@ -1,529 +0,0 @@ ---- -title: Kong Ingress Controller annotations ---- - -The {{site.kic_product_name}} supports the following annotations on various -resources: - -## Ingress resource - -Following annotations are supported on Ingress resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the Ingress rules that Kong should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for specific Ingress. | -| [`konghq.com/protocols`](#konghqcomprotocols) | Set protocols to handle for each Ingress resource. | -| [`konghq.com/preserve-host`](#konghqcompreserve-host) | Pass the `host` header as is to the upstream service. | -| [`konghq.com/strip-path`](#konghqcomstrip-path) | Strip the path defined in Ingress resource and then forward the request to the upstream service. | -| [`konghq.com/https-redirect-status-code`](#konghqcomhttps-redirect-status-code) | Set the HTTPS redirect status code to use when an HTTP request is received. | -| [`konghq.com/regex-priority`](#konghqcomregex-priority) | Set the route's regex priority. | -| [`konghq.com/methods`](#konghqcommethods) | Set methods matched by this Ingress. | -| [`konghq.com/snis`](#konghqcomsnis) | Set SNI criteria for routes created from this Ingress. | -| [`konghq.com/request-buffering`](#konghqcomrequest-buffering) | Set request buffering on routes created from this Ingress. | -| [`konghq.com/response-buffering`](#konghqcomresponse-buffering) | Set response buffering on routes created from this Ingress. | -| [`konghq.com/host-aliases`](#konghqcomhostaliases) | Additional hosts for routes created from this Ingress's rules. | -| [`konghq.com/override`](#konghqcomoverride) | Control other routing attributes via `KongIngress` resource. | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-ingress-v1beta1` controller flag removes that requirement: -when enabled, the controller will process Ingresses with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is intended for -older configurations, as controller versions prior to 0.10 processed classless -Ingress resources by default. - -## Service resource - -Following annotations are supported on Service resources: - -| Annotation name | Description | -|-----------------|-------------| -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific Service | -| [`konghq.com/protocol`](#konghqcomprotocol) | Set protocol Kong should use to talk to a Kubernetes service | -| [`konghq.com/path`](#konghqcompath) | HTTP Path that is always prepended to each request that is forwarded to a Kubernetes service | -| [`konghq.com/client-cert`](#konghqcomclient-cert) | Client certificate and key pair Kong should use to authenticate itself to a specific Kubernetes service | -| [`konghq.com/host-header`](#konghqcomhost-header) | Set the value sent in the `Host` header when proxying requests upstream | -| [`konghq.com/override`](#konghqcomoverride) | Fine grained routing and load-balancing | -| [`ingress.kubernetes.io/service-upstream`](#ingresskubernetesioservice-upstream) | Offload load-balancing to kube-proxy or sidecar | - -## KongConsumer resource - -Following annotations are supported on KongConsumer resources: - -| Annotation name | Description | -|-----------------|-------------| -| REQUIRED [`kubernetes.io/ingress.class`](#kubernetesioingressclass) | Restrict the KongConsumers that a controller should satisfy | -| [`konghq.com/plugins`](#konghqcomplugins) | Run plugins for a specific consumer | - -`kubernetes.io/ingress.class` is normally required, and its value should match -the value of the `--ingress-class` controller argument ("kong" by default). - -Setting the `--process-classless-kong-consumer` controller flag removes that requirement: -when enabled, the controller will process KongConsumers with no -`kubernetes.io/ingress.class` annotation. Recommended best practice is to set -the annotation and leave this flag disabled; the flag is primarily intended for -older configurations, as controller versions prior to 0.10 processed classless -KongConsumer resources by default. - -## Annotations - -### kubernetes.io/ingress.class - -If you have multiple Ingress controllers in a single cluster, -you can pick one by specifying the `ingress.class` annotation. -Following is an example of -creating an Ingress with an annotation: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "gce" -spec: - rules: - - host: example.com - http: - paths: - - path: /test1 - backend: - serviceName: echo - servicePort: 80 -``` - -This will target the GCE controller, forcing the {{site.kic_product_name}} to -ignore it. - -On the other hand, an annotation such as - -```yaml -metadata: - name: test-1 - annotations: - kubernetes.io/ingress.class: "kong" -``` - -will target the {{site.kic_product_name}}, forcing the GCE controller -to ignore it. - -The following resources _require_ this annotation by default: - -- Ingress -- KongConsumer -- TCPIngress -- KongClusterPlugin -- Secret resources with the `ca-cert` label - -You can optionally allow Ingress or KongConsumer resources with no class -annotation (by setting the `--process-classless-ingress-v1beta1` or -`--process-classless-kong-consumer` flags, respectively), though recommended -best practice is to leave these flags disabled: the flags are primarily -intended for compatibility with configuration created before this requirement -was introduced in controller 0.10. - -If you allow classless resources, you must take care when using multiple -controller instances in a single cluster: only one controller instance should -enable these flags to avoid different controller instances fighting over -classless resources, which will result in unexpected and unknown behavior. - -The ingress class used by the {{site.kic_product_name}} to filter Ingress -resources can be changed using the `CONTROLLER_INGRESS_CLASS` -environment variable. - -```yaml -spec: - template: - spec: - containers: - - name: kong-ingress-internal-controller - env: - - name: CONTROLLER_INGRESS_CLASS - value: kong-internal -``` - -#### Multiple unrelated {{site.kic_product_name}}s {#multiple-unrelated-controllers} - -In some deployments, one might use multiple {{site.kic_product_name}}s -in the same Kubernetes cluster -(e.g. one which serves public traffic, one which serves "internal" traffic). -For such deployments, please ensure that in addition to different -`ingress-class`, the `--election-id` is also different. - -In such deployments, `kubernetes.io/ingress.class` annotation can be used on the -following custom resources as well: - -- KongPlugin: To configure (global) plugins only in one of the Kong clusters. -- KongConsumer: To create different consumers in different Kong clusters. -- KongCredential: To create associated credentials for consumers. - -### konghq.com/plugins - -> Available since controller 0.8 - -Kong's power comes from its plugin architecture, where plugins can modify -the request and response or impose certain policies on the requests as they -are proxied to your service. - -With the {{site.kic_product_name}}, plugins can be configured by creating -`KongPlugin` Custom Resources and then associating them with an Ingress, Service, -KongConsumer or a combination of those. - -Following is an example of how to use the annotation: - -```yaml -konghq.com/plugins: high-rate-limit, docs-site-cors -``` - -Here, `high-rate-limit` and `docs-site-cors` -are the names of the KongPlugin resources which -should be to be applied to the Ingress rules defined in the -Ingress resource on which the annotation is being applied. - -This annotation can also be applied to a Service resource in Kubernetes, which -will result in the plugin being executed at Service-level in Kong, -meaning the plugin will be -executed for every request that is proxied, no matter which Route it came from. - -This annotation can also be applied to a KongConsumer resource, -which results in plugin being executed whenever the specific consumer -is accessing any of the defined APIs. - -Finally, this annotation can also be applied on a combination of the -following resources: -- **Ingress and KongConsumer** - If an Ingress resource and a KongConsumer resource share a plugin in the - `konghq.com/plugins` annotation then the plugin will be created for the - combination of those to resources in Kong. -- **Service and KongConsumer** - Same as the above case, if you would like to give a specific consumer or - client of your service some special treatment, you can do so by applying - the same annotation to both of the resources. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how this annotation can be used. - - -### konghq.com/path - -> Available since controller 0.8 - -This annotation can be used on a Service resource only. -This annotation can be used to prepend an HTTP path of a request, -before the request is forwarded. - -For example, if the annotation `konghq.com/path: "/baz"` is applied to a -Kubernetes Service `billings`, then any request that is routed to the -`billings` service will be prepended with `/baz` HTTP path. If the -request contains `/foo/something` as the path, then the service will -receive an HTTP request with path set as `/baz/foo/something`. - -### konghq.com/strip-path - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the part of the path specified in the Ingress rule - will be stripped out before the request is sent to the service. - For example, if the Ingress rule has a path of `/foo` and the HTTP request - that matches the Ingress rule has the path `/foo/bar/something`, then - the request sent to the Kubernetes service will have the path - `/bar/something`. -- `"false"`: If set to false, no path manipulation is performed. - -All other values are ignored. -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/strip-path: "true" -``` - -### konghq.com/preserve-host - -> Available since controller 0.8 - -This annotation can be applied to an Ingress resource and can take two values: -- `"true"`: If set to true, the `host` header of the request will be sent - as is to the Service in Kubernetes. -- `"false"`: If set to false, the `host` header of the request is not preserved. - -Please note the quotes (`"`) around the boolean value. - -Sample usage: - -```yaml -konghq.com/preserve-host: "true" -``` - -### konghq.com/https-redirect-status-code - -> Available since controller 0.8 - -By default, Kong sends HTTP Status Code 426 for requests -that need to be redirected to HTTPS. -This can be changed using this annotations. -Acceptable values are: -- 301 -- 302 -- 307 -- 308 -- 426 - -Any other value will be ignored. - -Sample usage: - -```yaml -konghq.com/https-redirect-status-code: "301" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/regex-priority - -> Available since controller 0.9 - -Sets the `regex_priority` setting to this value on the Kong route associated -with the Ingress resource. This controls the [matching evaluation -order](/gateway/latest/reference/proxy/#evaluation-order) for regex-based -routes. It accepts any integer value. Routes are evaluated in order of highest -priority to lowest. - -Sample usage: - -```yaml -konghq.com/regex-priority: "10" -``` - -Please note the quotes (`"`) around the integer value. - -### konghq.com/methods - -> Available since controller 0.9 - -Sets the `methods` setting on the Kong route associated with the Ingress -resource. This controls which request methods will match the route. Any -uppercase alpha ASCII string is accepted, though most users will use only -[standard methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). - -Sample usage: - -```yaml -konghq.com/methods: "GET,POST" -``` - -### konghq.com/snis - -> Available since controller 1.1 - -Sets the `snis` match criteria on the Kong route associated with this Ingress. -When using route-attached plugins that execute during the certificate -phase (for example, [Mutual TLS Authentication](/hub/kong-inc/mtls-auth/)), -the `snis` annotation allows route matching based on the server name -indication information sent in a client's TLS handshake. - -Sample usage: - -```yaml -konghq.com/snis: "foo.example.com, bar.example.com" -``` - -### konghq.com/request-buffering - -> Available since controller 1.2 - -Enables or disables request buffering on the Kong route associated with this -Ingress. - -Sample usage: - -```yaml -konghq.com/request-buffering: "false" -``` - -### konghq.com/response-buffering - -> Available since controller 1.2 - -Enables or disables response buffering on the Kong route associated with this -Ingress. - -Sample usage: - -```yaml -konghq.com/response-buffering: "false" -``` - -### konghq.com/host-aliases - -> Available since controller 1.3 - -Set additional hosts for routes created from rules on this Ingress. - -Sample usage: - -```yaml -konghq.com/host-aliases: "example.com,example.net" -``` - -This annotation applies to all rules equally. An Ingress like this: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress - annotations: - konghq.com/host-aliases: "example.com,example.net" -spec: - rules: - - host: "foo.example" - http: - paths: - - pathType: Prefix - path: "/bar" - backend: - service: - name: service1 - port: - number: 80 - - host: "bar.example" - http: - paths: - - pathType: Prefix - path: "/bar" - backend: - service: - name: service2 - port: - number: 80 -``` - -Results in two routes: - -``` -{"hosts":["foo.example", "example.com", "example.net"], "paths":["/foo"]} -{"hosts":["bar.example", "example.com", "example.net"], "paths":["/bar"]} -``` - -{:.important} -> To avoid creating overlapping routes, don't reuse the same path in multiple rules. - -### konghq.com/override - -> Available since controller 0.8 - -This annotation can associate a KongIngress resource with -an Ingress or a Service resource. -It serves as a way to bridge the gap between a sparse Ingress API in Kubernetes -with fine-grained controlled using the properties of Service, Route -and Upstream entities in Kong. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this annotation. - -### konghq.com/protocol - -> Available since controller 0.8 - -This annotation can be set on a Kubernetes Service resource and indicates -the protocol that should be used by Kong to communicate with the service. -In other words, the protocol is used for communication between a -[Kong Service](/gateway/api/admin-ee/latest/#/Services/list-service/) and -a Kubernetes Service, internally in the Kubernetes cluster. - -Accepted values are: -- `http` -- `https` -- `grpc` -- `grpcs` -- `tcp` -- `tls` - -### konghq.com/protocols - -> Available since controller 0.8 - -This annotation sets the list of acceptable protocols for the all the rules -defined in the Ingress resource. -The protocols are used for communication between the -Kong and the external client/user of the Service. - -You usually want to set this annotation for the following two use-cases: -- You want to redirect HTTP traffic to HTTPS, in which case you will use - `konghq.com/protocols: "https"` -- You want to define gRPC routing, in which case you should use - `konghq.com/protocols: "grpc,grpcs"` - -### konghq.com/client-cert - -> Available since controller 0.8 - -This annotation sets the certificate and key-pair Kong should use to -authenticate itself against the upstream service, if the upstream service -is performing mutual-TLS (mTLS) authentication. - -The value of this annotation should be the name of the Kubernetes TLS Secret -resource which contains the TLS cert and key pair. - -Under the hood, the controller creates a Certificate in Kong and then -sets the -[`service.client_certificate`](/gateway/api/admin-ee/latest/#/Services/list-service/) -for the service. - -### konghq.com/host-header - -> Available since controller 0.9 - -Sets the `host_header` setting on the Kong upstream created to represent a -Kubernetes Service. By default, Kong upstreams set `Host` to the hostname or IP -address of an individual target (the Pod IP for controller-managed -configuration). This annotation overrides the default behavior and sends -the annotation value as the `Host` header value. - -If `konghq.com/preserve-host: true` is present on an Ingress (or -`route.preserve_host: true` is present in a linked KongIngress), it will take -precedence over this annotation, and requests to the application will use the -hostname in the Ingress rule. - -Sample usage: - -```yaml -konghq.com/host-header: "test.example.com" -``` - -### ingress.kubernetes.io/service-upstream - -By default, the {{site.kic_product_name}} distributes traffic amongst all the -Pods of a Kubernetes `Service` by forwarding the requests directly to -Pod IP addresses. One can choose the load-balancing strategy to use -by specifying a KongIngress resource. - -However, in some use-cases, the load-balancing should be left up -to `kube-proxy`, or a sidecar component in the case of Service Mesh deployments. - -Setting this annotation to a Service resource in Kubernetes will configure -the {{site.kic_product_name}} to directly forward -the traffic outbound for this Service -to the IP address of the service (usually the ClusterIP). - -`kube-proxy` can then decide how it wants to handle the request and route the -traffic accordingly. If a sidecar intercepts the traffic from the controller, -it can also route traffic as it sees fit in this case. - -Following is an example snippet you can use to configure this annotation -on a `Service` resource in Kubernetes, (please note the quotes around `true`): - -```yaml -annotations: - ingress.kubernetes.io/service-upstream: "true" -``` - -You need {{site.kic_product_name}} >= 0.6 for this annotation. diff --git a/app/kubernetes-ingress-controller/1.3.x/references/cli-arguments.md b/app/kubernetes-ingress-controller/1.3.x/references/cli-arguments.md deleted file mode 100644 index 04675924d95c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/references/cli-arguments.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: CLI Arguments ---- - -Various settings and configurations of the controller can be tweaked -using CLI flags. - -## Environment variables - -Each flag defined in the table below can also be configured using -an environment variable. The name of the environment variable is `CONTROLLER_` -string followed by the name of flag in uppercase. - -For example, `--ingress-class` can be configured using the following -environment variable: - -``` -CONTROLLER_INGRESS_CLASS=kong-foobar -``` - -It is recommended that all the configuration is done via environment variables -and not CLI flags. - -## Flags - -Following table describes all the flags that are available: - -| Flag | Type | Default | Description | -|-------|------|---------|-------------| -| --admission-webhook-cert-file |`string` | `/admission-webhook/tls.crt` | Path to the PEM-encoded certificate file for TLS handshake.| -| --admission-webhook-key-file |`string` | `/admission-webhook/tls.key` | Path to the PEM-encoded private key file for TLS handshake.| -| --admission-webhook-cert |`string` | none | PEM-encoded certificate string for TLS handshake.| -| --admission-webhook-key |`string` | none | PEM-encoded private key string for TLS handshake.| -| --admission-webhook-listen |`string` | `off` | The address to start admission controller on (ip:port). Setting it to 'off' disables the admission controller.| -| --anonymous-reports |`string` | `true` | Send anonymized usage data to help improve Kong.| -| --apiserver-host |`string` | none | The address of the Kubernetes API server to connect to in the format of protocol://address:port, e.g., "http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.| -| --disable-ingress-extensionsv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `extensions/v1beta1`.| -| --disable-ingress-networkingv1beta1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1beta1`.| -| --disable-ingress-networkingv1 |`boolean` | `false` | Disable processing Ingress resources with apiVersion `networking/v1`.| -| --dump-config |`string` | none | Dump generated configuration to a temporary directory when set to `enabled`. When set to `sensitive`, dumps include certificate+key pairs and credentials.| -| --election-id |`string` | `ingress-controller-leader` | The name of ConfigMap (in the same namespace) to use to facilitate leader-election between multiple instances of the controller.| -| --ingress-class |`string` | `kong` | Ingress class name to use to filter Ingress and custom resources when multiple Ingress Controllers are running in the same Kubernetes cluster.| -| --kong-admin-ca-cert-file |`string` | none | Path to PEM-encoded CA certificate file to verify Kong's Admin SSL certificate.| -| --kong-admin-ca-cert |`string` | none | PEM-encoded CA certificate string to verify Kong's Admin SSL certificate.| -| --kong-admin-concurrency |`int` | `10` | Max number of concurrent requests sent to Kong's Admin API.| -| --kong-admin-filter-tag |`string` | `managed-by-ingress-controller` | The tag used to manage entities in Kong.| -| --kong-admin-header |`string` | none | Add a header (key:value) to every Admin API call, this flag can be used multiple times to specify multiple headers.| -| --kong-admin-token |`string` | none | Set the {{site.ee_product_name}} RBAC token to be used by the controller.| -| --kong-admin-tls-server-name |`string` | none | SNI name to use to verify the certificate presented by Kong in TLS.| -| --kong-admin-tls-skip-verify |`boolean` | `false` | Disable verification of TLS certificate of Kong's Admin endpoint.| -| --kong-admin-url |`string` | `http://localhost:8001` | The address of the Kong Admin URL to connect to in the format of `protocol://address:port`.| -| --kong-workspace |`string` | `default` | Workspace in {{site.ee_product_name}} to be configured.| -| --kong-custom-entities-secret |`string` | none | Secret containing custom entities to be populated in DB-less mode, takes the form `namespace/name`.| -| --log-format |`string` | `text` | Format of logs of the controller. Allowed values are `text` and `json`. | -| --log-level |`string` | `info` | Level of logging for the controller. Allowed values are `trace`, `debug`, `info`, `warn`, `error`, `fatal` and `panic`. | -| --enable-reverse-sync |`bool` | `false` | Enable reverse checks from Kong to Kubernetes. Use this option only if a human has edit access to Kong's Admin API. | -| --kubeconfig |`string` | none | Path to kubeconfig file with authorization and master location information.| -| --profiling |`boolean` | `true` | Enable profiling via web interface `host:port/debug/pprof/`. | -| --publish-service |`string` | none | The namespaces and name of the Kubernetes Service fronting the {{site.kic_product_name}} in the form of namespace/name. The controller will set the status of the Ingress resources to match the endpoints of this service. In reference deployments, this is kong/kong-proxy.| -| --publish-status-address |`string` | none | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address.| -| --process-classless-ingress-v1beta1 |`boolean` | `false` | Toggles whether the controller processes `extensions/v1beta1` and `networking/v1beta1` Ingress resources that have no `kubernetes.io/ingress.class` annotation.| -| --process-classless-ingress-v1 |`boolean` | `false` | Toggles whether the controller processes `networking/v1` Ingress resources that have no `kubernetes.io/ingress.class` annotation or class field.| -| --process-classless-kong-consumer |`boolean` | `false` | Toggles whether the controller processes KongConsumer resources that have no `kubernetes.io/ingress.class` annotation.| -| --stderrthreshold |`string` | `2` | logs at or above this threshold go to stderr.| -| --sync-period |`duration` | `10m` | Relist and confirm cloud resources this often.| -| --sync-rate-limit |`float32` | `0.3` | Define the sync frequency upper limit. | -| --update-status |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname.| -| --update-status-on-shutdown |`boolean` | `true` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped.| -| --version |`boolean` | `false` | Shows release information about the {{site.kic_product_name}}.| -| --watch-namespace |`string` | none | Namespace to watch for Ingress and custom resources. The default value of an empty string results in the controller watching for resources in all namespaces and configuring Kong accordingly.| -| --help |`boolean` | `false` | Shows this documentation on the CLI and exit.| diff --git a/app/kubernetes-ingress-controller/1.3.x/references/custom-resources.md b/app/kubernetes-ingress-controller/1.3.x/references/custom-resources.md deleted file mode 100644 index e0f7205e4fea..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/references/custom-resources.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -The Ingress Controller can configure Kong specific features -using several [Custom Resource Definitions(CRDs)][k8s-crd]. - -Following CRDs enables users to declaratively configure all aspects of Kong: - -- [**KongPlugin**](#kongplugin): This resource corresponds to - the [Plugin][kong-plugin] entity in Kong. -- [**KongIngress**](#kongingress): This resource provides fine-grained control - over all aspects of proxy behaviour like routing, load-balancing, - and health checking. It serves as an "extension" to the Ingress resources - in Kubernetes. -- [**KongConsumer**](#kongconsumer): - This resource maps to the [Consumer][kong-consumer] entity in Kong. -- [**TCPIngress**](#tcpingress): - This resource can configure TCP-based routing in Kong for non-HTTP - services running inside Kubernetes. -- [**KongCredential (Deprecated)**](#kongcredential-deprecated): - This resource maps to - a credential (key-auth, basic-auth, jwt, hmac-auth) that is associated with - a specific KongConsumer. - -## KongPlugin - -This resource provides an API to configure plugins inside Kong using -Kubernetes-style resources. - -Please see the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#KongPlugin) -document for how the resource should be used. - -The following snippet shows the properties available in KongPlugin resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: - namespace: -disabled: # optionally disable the plugin in Kong -config: # configuration for the plugin - key: value -configFrom: - secretKeyRef: - name: - key: -plugin: # like key-auth, rate-limiting etc -``` - -- `config` contains a list of `key` and `value` - required to configure the plugin. - All configuration values specific to the type of plugin go in here. - Please read the documentation of the plugin being configured to set values - in here. For any plugin in Kong, anything that goes in the `config` JSON - key in the Admin API request, goes into the `config` YAML key in this resource. - Please use a valid JSON to YAML convertor and place the content under the - `config` key in the YAML above. -- `configFrom` contains a reference to a Secret and key, where the key contains - a complete JSON or YAML configuration. This should be used when the plugin - configuration contains sensitive information, such as AWS credentials in the - Lambda plugin or the client secret in the OIDC plugin. Only one of `config` - or `configFrom` may be used in a KongPlugin, not both at once. -- `plugin` field determines the name of the plugin in Kong. - This field was introduced in {{site.kic_product_name}} 0.2.0. - -**Please note:** validation of the configuration fields is left to the user -by default. It is advised to setup and use the admission validating controller -to catch user errors. - -The plugins can be associated with Ingress -or Service object in Kubernetes using `konghq.com/plugins` annotation. - -### Examples - -#### Applying a plugin to a service - -Given the following plugin: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - header_name: my-request-id - echo_downstream: true -plugin: correlation-id -``` - -It can be applied to a service by annotating like: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp-service - labels: - app: myapp-service - annotations: - konghq.com/plugins: request-id -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: myapp-service - selector: - app: myapp-service -``` - -#### Applying a plugin to an ingress - -The KongPlugin above can be applied to a specific ingress (route or routes): - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - konghq.com/plugins: request-id - kubernetes.io/ingress.class: kong -spec: - rules: - - host: example.com - http: - paths: - - path: /bar - backend: - serviceName: echo - servicePort: 80 -``` - -A plugin can also be applied to a specific KongConsumer by adding -`konghq.com/plugins` annotation to the KongConsumer resource. - -Please follow the -[Using the KongPlugin resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource) -guide for details on how to use this resource. - -#### Applying a plugin with a secret configuration - -The plugin above can be modified to store its configuration in a secret: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -configFrom: - secretKeyRef: - name: plugin-conf-secret - key: request-id -plugin: correlation-id -``` - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: plugin-conf-secret -stringData: - request-id: | - header_name: my-request-id - echo_downstream: true -type: Opaque -``` - -## KongClusterPlugin - -A `KongClusterPlugin` is same as `KongPlugin` resource. The only differences -are that it is a Kubernetes cluster-level resource instead of a namespaced -resource, and can be applied as a global plugin using labels. - -Please consult the [KongPlugin](#kongplugin) section for details. - -*Example:* - -KongClusterPlugin example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongClusterPlugin -metadata: - name: request-id - annotations: - kubernetes.io/ingress.class: - labels: - global: "true" # optional, if set, then the plugin will be executed - # for every request that Kong proxies - # please note the quotes around true -config: - header_name: my-request-id -configFrom: - secretKeyRef: - name: - key: - namespace: -plugin: correlation-id -``` - -As with KongPlugin, only one of `config` or `configFrom` can be used. - -Setting the label `global` to `"true"` will apply the plugin globally in Kong, -meaning it will be executed for every request that is proxied via Kong. - -## KongIngress - -Ingress resource spec in Kubernetes can define routing policies -based on HTTP Host header and paths. -While this is sufficient in most cases, -sometimes, users may want more control over routing at the Ingress level. -`KongIngress` serves as an "extension" to Ingress resource. -It is not meant as a replacement to the -`Ingress` resource in Kubernetes. - -Please read the [concept](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/#kongingress) -document for why this resource exists and how it relates to the existing -Ingress resource. - -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and -[Route][kong-route] entities in Kong related to an Ingress resource -can be modified. - -Once a `KongIngress` resource is created, it needs to be associated with -an Ingress or Service resource using the following annotation: - -```yaml -konghq.com/override: kong-ingress-resource-name -``` - -Specifically, - -- To override any properties related to health-checking, load-balancing, - or details specific to a service, add the annotation to the Kubernetes - Service that is being exposed via the Ingress API. -- To override routing configuration (like protocol or method based routing), - add the annotation to the Ingress resource. - -Please follow the -[Using the KongIngress resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-kongingress-resource) -guide for details on how to use this resource. - -For reference, the following is a complete spec for KongIngress: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: configuration-demo -upstream: - slots: 10 - hash_on: none - hash_fallback: none - healthchecks: - threshold: 25 - active: - concurrency: 10 - healthy: - http_statuses: - - 200 - - 302 - interval: 0 - successes: 0 - http_path: "/" - timeout: 1 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - interval: 0 - tcp_failures: 0 - timeouts: 0 - passive: - healthy: - http_statuses: - - 200 - successes: 0 - unhealthy: - http_failures: 0 - http_statuses: - - 429 - - 503 - tcp_failures: 0 - timeouts: 0 -proxy: - protocol: http - path: / - connect_timeout: 10000 - retries: 10 - read_timeout: 10000 - write_timeout: 10000 -route: - methods: - - POST - - GET - regex_priority: 0 - strip_path: false - preserve_host: true - protocols: - - http - - https -``` - -## TCPIngress - -The Ingress resource in Kubernetes is HTTP-only. -This custom resource is modeled similar to the Ingress resource but for -TCP and TLS SNI based routing purposes: - -```yaml -apiVersion: configuration.konghq.com/v1beta1 -kind: TCPIngress -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -spec: - rules: - - host: - port: - backend: - serviceName: - servicePort: -``` - -If `host` is not specified, then port-based TCP routing is performed. Kong -doesn't care about the content of TCP stream in this case. - -If `host` is specified, then Kong expects the TCP stream to be TLS-encrypted -and Kong will terminate the TLS session based on the SNI. -Also note that, the port in this case should be configured with `ssl` parameter -in Kong. - -## KongConsumer - -This custom resource configures a consumer in Kong: - -The following snippet shows the field available in the resource: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: - namespace: - annotations: - kubernetes.io/ingress.class: -username: -custom_id: -``` - -An example: - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: consumer-team-x - annotations: - kubernetes.io/ingress.class: kong -username: team-X -``` - -When this resource is created, a corresponding consumer entity will be -created in Kong. - -Consumers' `username` and `custom_id` values must be unique across the Kong -cluster. While KongConsumers exist in a specific Kubernetes namespace, -KongConsumers from all namespaces are combined into a single Kong -configuration, and no KongConsumers with the same `kubernetes.io/ingress.class` -may share the same `username` or `custom_id` value. - -## KongCredential (Deprecated) - -This custom resource can be used to configure a consumer specific -entities in Kong. -The resource reference the KongConsumer resource via the `consumerRef` key. - -The validation of the config object is left up to the user. - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongCredential -metadata: - name: credential-team-x -consumerRef: consumer-team-x -type: key-auth -config: - key: 62eb165c070a41d5c1b58d9d3d725ca1 -``` - -The following credential types can be provisioned using the KongCredential -resource: - -- `key-auth` for [Key authentication](/hub/kong-inc/key-auth/) -- `basic-auth` for [Basic authentication](/hub/kong-inc/basic-auth/) -- `hmac-auth` for [HMAC authentication](/hub/kong-inc/hmac-auth/) -- `jwt` for [JWT based authentication](/hub/kong-inc/jwt/) -- `oauth2` for [Oauth2 Client credentials](/hub/kong-inc/oauth2/) -- `acl` for [ACL group associations](/hub/kong-inc/acl/) - -Please ensure that all fields related to the credential in Kong -are present in the definition of KongCredential's `config` section. - -Please refer to the -[using the Kong Consumer and Credential resource](/kubernetes-ingress-controller/{{page.release}}/guides/using-consumer-credential-resource) -guide for details on how to use this resource. - -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ diff --git a/app/kubernetes-ingress-controller/1.3.x/references/plugin-compatibility.md b/app/kubernetes-ingress-controller/1.3.x/references/plugin-compatibility.md deleted file mode 100644 index 66250f3b167f..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/references/plugin-compatibility.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Plugin Compatibility ---- - -DB-less mode is the preferred choice for controller-managed Kong and Kong -Enterprise clusters. However, not all plugins are available in DB-less mode. -Review the table below to check if a plugin you wish to use requires a -database. - -Note that some DB-less compatible plugins have some limitations or require -non-default configuration for -[compatibility](/gateway/latest/reference/db-less-and-declarative-config/#plugin-compatibility). - -## Kong - -| Plugin | Kong | Kong (DB-less) | -|-------------------------|-------------------------------|-------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | - -## {{site.ee_product_name}} - -{% include md/enterprise/k8s-image-note.md %} - -| Plugin | Kong for Kubernetes with {{site.ee_product_name}} | Kong for Kubernetes Enterprise | -|----------------------------------|--------------------------------------------|-------------------------------------------| -| acl | | | -| aws-lambda | | | -| azure-functions | | | -| basic-auth | | | -| bot-detection | | | -| correlation-id | | | -| cors | | | -| datadog | | | -| file-log | | | -| hmac-auth | | | -| http-log | | | -| ip-restriction | | | -| jwt | | | -| key-auth | | | -| oauth2 | | | -| post-function | | | -| pre-function | | | -| prometheus | | | -| proxy-cache | | | -| rate-limiting | | | -| request-termination | | | -| request-transformer | | | -| response-ratelimiting | | | -| response-transformer | | | -| syslog | | | -| tcp-log | | | -| udp-log | | | -| zipkin | | | -| application-registration | | 1 | -| canary release | | | -| collector | | | -| degraphql | | | -| exit-transformer | | | -| forward-proxy | | | -| graphql-proxy-cache-advanced | | | -| graphql-rate-limiting-advanced | | | -| jwt-signer | | | -| kafka-log | | | -| kafka-upstream | | | -| key-auth-enc | | | -| ldap-auth-advanced | | | -| mtls-auth | | | -| oauth2-introspection | | | -| openid-connect | | | -| proxy-cache-advanced | | | -| rate-limiting-advanced | | | -| request-transformer-advanced | | 2 | -| request-validator | | | -| response-transformer-advanced | | | -| route-transformer-advanced | | | -| statsd-advanced | | 3 | -| vault-auth | | | - -1 Only used with Dev Portal - -2 request-transformer now has feature parity with - request-transformer-advanced. request-transformer-advanced remains only for - compatibility with existing configurations. - -3 Only used with Vitals diff --git a/app/kubernetes-ingress-controller/1.3.x/references/version-compatibility.md b/app/kubernetes-ingress-controller/1.3.x/references/version-compatibility.md deleted file mode 100644 index e0d1965afe0c..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/references/version-compatibility.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Version Compatibility ---- - -Kong's Kubernetes ingress controller is compatible with different flavors of Kong. -The following sections detail on compatibility between versions. - -## Kong - -By Kong, we are here referring to the official distribution of the Open-Source -{{site.base_gateway}}. - -| {{site.kic_product_name}} | <= 0.0.4 | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | 1.3.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kong 0.13.x | | | | | | | | | | | | | | | | | -| Kong 0.14.x | | | | | | | | | | | | | | | | | -| Kong 1.0.x | | | | | | | | | | | | | | | | | -| Kong 1.1.x | | | | | | | | | | | | | | | | | -| Kong 1.2.x | | | | | | | | | | | | | | | | | -| Kong 1.3.x | | | | | | | | | | | | | | | | | -| Kong 1.4.x | | | | | | | | | | | | | | | | | -| Kong 1.5.x | | | | | | | | | | | | | | | | | -| Kong 2.0.x | | | | | | | | | | | | | | | | | -| Kong 2.1.x | | | | | | | | | | | | | | | | | -| Kong 2.2.x | | | | | | | | | | | | | | | | | -| Kong 2.3.x | | | | | | | | | | | | | | | | | -| Kong 2.4.x | | | | | | | | | | | | | | | | | -| Kong 2.5.x | | | | | | | | | | | | | | | | | -| Kong 2.6.x | | | | | | | | | | | | | | | | | - -## {{site.ee_product_name}} - -{{site.ee_product_name}} is the official enterprise distribution, which includes all -other enterprise functionality, built on top of the Open-Source {{site.base_gateway}}. - -| {{site.kic_product_name}} | 0.0.5 | 0.1.x | 0.2.x | 0.3.x | 0.4.x | 0.5.x | 0.6.x | 0.7.x | 0.8.x | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | 1.3.x | -|:------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| {{site.ee_product_name}} 0.32-x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.33-x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.34-x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.35-x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 0.36-x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.3.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 1.5.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.1.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.2.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.3.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.4.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.5.x | | | | | | | | | | | | | | | | -| {{site.ee_product_name}} 2.6.x | | | | | | | | | | | | | | | | - -## Kubernetes - -| {{site.kic_product_name}} | 0.9.x | 0.10.x | 1.0.x | 1.1.x | 1.2.x | 1.3.x | -|:--------------------------|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:|:---------------------------:| -| Kubernetes 1.13 | | | | | | | -| Kubernetes 1.14 | | | | | | | -| Kubernetes 1.15 | | | | | | | -| Kubernetes 1.16 | | | | | | | -| Kubernetes 1.17 | | | | | | | -| Kubernetes 1.18 | | | | | | | -| Kubernetes 1.19 | | | | | | | -| Kubernetes 1.20 | | | | | | | -| Kubernetes 1.21 | | | | | | | diff --git a/app/kubernetes-ingress-controller/1.3.x/troubleshooting.md b/app/kubernetes-ingress-controller/1.3.x/troubleshooting.md deleted file mode 100644 index 0d9630d56196..000000000000 --- a/app/kubernetes-ingress-controller/1.3.x/troubleshooting.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Debug & Troubleshooting ---- - -## Debug - -Using the flag `--v=XX` it is possible to increase the level of logging. -In particular: - -- `--v=3` shows details about the service, Ingress rule, and endpoint changes - -## Authentication to the Kubernetes API Server - -A number of components are involved in the authentication process and the first step is to narrow -down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. -Both authentications must work: - -```text -+-------------+ service +------------+ -| | authentication | | -+ apiserver +<-------------------+ ingress | -| | | controller | -+-------------+ +------------+ - -``` - -## Service authentication - -The Ingress controller needs information from API server to configure Kong. -Therefore, authentication is required, which can be achieved in two different ways: - -1. **Service Account**: This is recommended - because nothing has to be configured. - The Ingress controller will use information provided by the system - to communicate with the API server. - See 'Service Account' section for details. -1. **Kubeconfig file**: In some Kubernetes environments - service accounts are not available. - In this case, a manual configuration is required. - The Ingress controller binary can be started with the `--kubeconfig` flag. - The value of the flag is a path to a file specifying how - to connect to the API server. Using the `--kubeconfig` - does not require the flag `--apiserver-host`. - The format of the file is identical to `~/.kube/config` - which is used by `kubectl` to connect to the API server. - See 'kubeconfig' section for details. - -## Discovering API-server - -Using this flag `--apiserver-host=http://localhost:8080`, -it is possible to specify an unsecured API server or -reach a remote Kubernetes cluster using -[kubectl proxy](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#using-kubectl-proxy). -Please do not use this approach in production. - -In the diagram below you can see the full authentication flow with all options, starting with the browser -on the lower left hand side. - -```text - -Kubernetes Workstation -+---------------------------------------------------+ +------------------+ -| | | | -| +-----------+ apiserver +------------+ | | +------------+ | -| | | proxy | | | | | | | -| | apiserver | | ingress | | | | ingress | | -| | | | controller | | | | controller | | -| | | | | | | | | | -| | | | | | | | | | -| | | service account/ | | | | | | | -| | | kubeconfig | | | | | | | -| | +<-------------------+ | | | | | | -| | | | | | | | | | -| +------+----+ kubeconfig +------+-----+ | | +------+-----+ | -| |<--------------------------------------------------------| | -| | | | -+---------------------------------------------------+ +------------------+ -``` - -## Service Account - -If using a service account to connect to the API server, Dashboard expects the file -`/var/run/secrets/kubernetes.io/serviceaccount/token` to be present. It provides a secret -token that is required to authenticate with the API server. - -Verify with the following commands: - -```shell -# start a container that contains curl -$ kubectl run test --image=tutum/curl -- sleep 10000 - -# check that container is running -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -test-701078429-s5kca 1/1 Running 0 16s - -# check if secret exists -$ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt -namespace -token - -# get service IP of master -$ kubectl get services -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes 10.0.0.1 443/TCP 1d - -# check base connectivity from cluster inside -$ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 -Unauthorized - -# connect using tokens -$ TOKEN_VALUE=$(kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token) -$ echo $TOKEN_VALUE -eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A -$ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $TOKEN_VALUE" https://10.0.0.1 -{ - "paths": [ - "/api", - "/api/v1", - "/apis", - "/apis/apps", - "/apis/apps/v1alpha1", - "/apis/authentication.k8s.io", - "/apis/authentication.k8s.io/v1beta1", - "/apis/authorization.k8s.io", - "/apis/authorization.k8s.io/v1beta1", - "/apis/autoscaling", - "/apis/autoscaling/v1", - "/apis/batch", - "/apis/batch/v1", - "/apis/batch/v2alpha1", - "/apis/certificates.k8s.io", - "/apis/certificates.k8s.io/v1alpha1", - "/apis/extensions", - "/apis/extensions/v1beta1", - "/apis/policy", - "/apis/policy/v1alpha1", - "/apis/rbac.authorization.k8s.io", - "/apis/rbac.authorization.k8s.io/v1alpha1", - "/apis/storage.k8s.io", - "/apis/storage.k8s.io/v1beta1", - "/healthz", - "/healthz/ping", - "/logs", - "/metrics", - "/swaggerapi/", - "/ui/", - "/version" - ] -} -``` - -If it is not working, there are two possible reasons: - -1. The contents of the tokens are invalid. - Find the secret name: - - ```bash - kubectl get secrets --field-selector=type=kubernetes.io/service-account-token - ``` - Delete the secret: - - ```bash - kubectl delete secret {SECRET_NAME} - ``` - - It will automatically be recreated. -1. You have a non-standard Kubernetes installation - and the file containing the token may not be present. - -The API server will mount a volume containing this file, -but only if the API server is configured to use -the ServiceAccount admission controller. -If you experience this error, -verify that your API server is using the ServiceAccount admission controller. -If you are configuring the API server by hand, -you can set this with the `--admission-control` parameter. -Please note that you should use other admission controllers as well. -Before configuring this option, please read about admission controllers. - -More information: - -- [User Guide: Service Accounts](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [Cluster Administrator Guide: Managing Service Accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) - -## Kubeconfig - -If you want to use a kubeconfig file for authentication, -follow the deploy procedure and -add the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml` to the deployment. - -## Dumping generated Kong configuration - -If the controller generates configuration that it cannot apply to Kong -successfully, reviewing the generated configuration manually and/or applying it -in a test environment can help locate potential causes. - -Under normal operation, the controller does not store generated configuration; -it is only sent to Kong's Admin API. The `--dump-config` flag enables a -diagnostic mode where the controller also saves generated configuration to a -temporary file. To use the diagnostic mode: - -1. Set the `--dump-config` flag (or `CONTROLLER_DUMP_CONFIG` environment - variable) to either `enabled` or `sensitive`. `enabled` produces a redacted - configuration that omits certificate configuration and credentials, suitable - for sharing with Kong support. `sensitive` dumps the complete configuration - exactly as it is sent to the Admin API. -1. Check controller logs for the dump location: - ```bash - kubectl logs PODNAME -c ingress-controller | grep "config dumps" - ``` -1. (Optional) Make a change to a Kubernetes resource that you know will - reproduce the issue. If you are unsure what change caused the issue - originally, you can omit this step. -1. Copy dumped configuration out of the controller for local review: - - ```bash - kubectl cp PODNAME:/path/to/dump/last_bad.json /tmp/last_bad.json -c ingress-controller - ``` - - If the controller successfully applied configuration - before the failure, you can also look at `last_good.json`. - -Once you have dumped configuration, take one of the following -approaches to isolate issues: - -- If you know of a specific Kubernetes resource change that reproduces the - issue, diffing `last_good.json` and `last_bad.json` will show the change - the controller is trying to apply unsuccessfully. -- You can apply dumped configuration via the `/config` Admin API endpoint - (DB-less mode) or using decK (DB-backed mode) to a test instance not managed - by the ingress controller. This approach lets you review requests - and responses (passing `--verbose 2` to decK will show all requests) and - add debug Kong Lua code when controller requests result in an - unhandled error (500 response). - -## Inspecting network traffic with a tcpdump sidecar - -Inspecting network traffic allows you to review traffic between the ingress -controller and Kong admin API and/or between the Kong proxy and upstream -applications. You can use this in situations where logged information does not -provide you sufficient data on the contents of requests and you wish to see -exactly what was sent over the network. - -Although you cannot install and use tcpdump within the controller -or Kong containers, you can add a tcpdump sidecar to your Pod's containers. The -sidecar will be able to sniff traffic from other containers in the Pod. You can -edit your Deployment (to add the sidecar to all managed Pods) or a single Pod -and add the following under the `containers` section of the Pod spec: - -```yaml -- name: tcpdump - securityContext: - runAsUser: 0 - image: corfr/tcpdump - command: - - /bin/sleep - - infinity -``` - -If you are using the Kong Helm chart, you can alternately add this to the -`sidecarContainers` section of values.yaml. - -Once the sidecar is running, you can use `kubectl exec -it POD_NAME -c tcpdump` -and run a capture. For example, to capture traffic between the controller and -Kong admin API: - -```bash -tcpdump -npi any -s0 -w /tmp/capture.pcap host 127.0.0.1 and port 8001 -``` - -or between Kong and an upstream application with endpoints `10.0.0.50` and -`10.0.0.51`: - -```bash -tcpdump -npi any -s0 -w /tmp/capture.pcap host 10.0.0.50 or host 10.0.0.51 -``` - -Once you've replicated the issue, you can stop the capture, exit the -container, and use `kubectl cp` to download the capture from the tcpdump -container to a local system for review with -[Wireshark](https://www.wireshark.org/). - -Note that you will typically need to temporarily disable TLS to inspect -application-layer traffic. If you have access to the server's private keys you -can [decrypt TLS](https://wiki.wireshark.org/TLS#TLS_Decryption), though this -does not work if the session uses an ephemeral cipher (neither the controller -nor Kong proxy have support for dumping session secrets). diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/custom-resources.md b/app/kubernetes-ingress-controller/2.0.x/concepts/custom-resources.md deleted file mode 100644 index a6c93f48cfeb..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/custom-resources.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Custom Resources ---- - -[Custom Resources][k8s-crd] in Kubernetes allow controllers -to extend Kubernetes-style -declarative APIs that are specific to certain applications. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -The {{site.kic_product_name}} uses the `configuration.konghq.com` API group -for storing configuration specific to Kong. - -The following CRDs allow users to declaratively configure all aspects of Kong: - -- [**KongIngress**](#kongingress) -- [**KongPlugin**](#kongplugin) -- [**KongClusterPlugin**](#kongclusterplugin) -- [**KongConsumer**](#kongconsumer) -- [**TCPIngress**](#tcpingress) -- [**UDPIngress**](#udpingress) - -## KongIngress - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, `KongIngress` Custom Resource is used as an -"extension" to the existing Ingress API to provide fine-grained control -over proxy behavior. -In other words, `KongIngress` works in conjunction with -the existing Ingress resource and extends it. -It is not meant as a replacement for the `Ingress` resource in Kubernetes. -Using `KongIngress`, all properties of [Upstream][kong-upstream], -[Service][kong-service] and [Route][kong-route] -entities in Kong related to an Ingress resource can be modified. - -Once a `KongIngress` resource is created, you can use the `configuration.konghq.com` -annotation to associate the `KongIngress` resource with an `Ingress` or a `Service` -resource: - -- When the annotation is added to the `Ingress` resource, the routing - configurations are updated, meaning all routes associated with the annotated - `Ingress` are updated to use the values defined in the `KongIngress`'s route - section. -- When the annotation is added to a `Service` resource in Kubernetes, - the corresponding `Service` and `Upstream` in Kong are updated to use the - `proxy` and `upstream` blocks as defined in the associated - `KongIngress` resource. - -The below diagram shows how the resources are linked -with one another: - -![Associating Kong Ingress](/assets/images/products/kubernetes-ingress-controller/kong-ingress-association.png "Associating Kong Ingress") - -## KongPlugin - -Kong is designed around an extensible [plugin][kong-plugin] -architecture and comes with a -wide variety of plugins already bundled inside it. -These plugins can be used to modify the request/response or impose restrictions -on the traffic. - -Once this resource is created, the resource needs to be associated with an -`Ingress`, `Service`, or `KongConsumer` resource in Kubernetes. -For more details, please read the reference documentation on `KongPlugin`. - -The below diagram shows how you can link `KongPlugin` resource to an -`Ingress`, `Service`, or `KongConsumer`: - -| | | -:-:|:-: -![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association1.png)|![](/assets/images/products/kubernetes-ingress-controller/kong-plugin-association2.png) - -## KongClusterPlugin - -_This resource requires the [`kubernetes.io/ingress.class` annotation](/kubernetes-ingress-controller/{{page.release}}/references/annotations/)._ - -KongClusterPlugin resource is exactly same as KongPlugin, except that it is a -Kubernetes cluster-level resources instead of being a namespaced resource. -This can help when the configuration of the plugin needs to be centralized -and the permissions to add/update plugin configuration rests with a different -persona than application owners. - -This resource can be associated with `Ingress`, `Service` or `KongConsumer` -and can be used in the exact same way as KongPlugin. - -A namespaced KongPlugin resource takes priority over a -KongClusterPlugin with the same name. - -## KongConsumer - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This custom resource configures `Consumers` in Kong. -Every `KongConsumer` resource in Kubernetes directly translates to a -[Consumer][kong-consumer] object in Kong. - -## TCPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing non-HTTP -and non-GRPC services running inside Kubernetes to -the outside world via Kong. This proves to be useful when -you want to use a single cloud LoadBalancer for all kinds -of traffic into your Kubernetes cluster. - -It is very similar to the Ingress resource that ships with Kubernetes. - -## UDPIngress - -_This resource requires the `kubernetes.io/ingress.class` annotation. Its value -must match the value of the controller's `--ingress-class` argument, which is -"kong" by default._ - -This Custom Resource is used for exposing [UDP][udp] services -running inside Kubernetes to the outside world via Kong. - -This is useful for services such as DNS servers, Game Servers, -VPN software and a variety of other applications. - -[udp]:https://datatracker.ietf.org/doc/html/rfc768 -[k8s-crd]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[kong-consumer]: /gateway/api/admin-ee/latest/#/Consumers/list-consumer/ -[kong-plugin]: /gateway/api/admin-ee/latest/#/Plugins/list-plugin -[kong-route]: /gateway/api/admin-ee/latest/#/Routes/list-route/ -[kong-service]: /gateway/api/admin-ee/latest/#/Services/list-service/ -[kong-upstream]: /gateway/api/admin-ee/latest/#/Upstreams/list-upstream/ diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/deployment.md b/app/kubernetes-ingress-controller/2.0.x/concepts/deployment.md deleted file mode 100644 index 564272890750..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/deployment.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -title: Kong Ingress Controller Deployment ---- - -The {{site.kic_product_name}} is designed to be deployed in a variety of ways -based on uses-cases. This document explains various components involved -and choices one can make as per the specific use-case. - -- [**Kubernetes Resources**](#kubernetes-resources): - Various Kubernetes resources required to run the {{site.kic_product_name}}. -- [**Deployment options**](#deployment-options): - A high-level explanation of choices that one should consider and customize - the deployment to best serve a specific use case. - -## Kubernetes Resources - -The following resources are used to run the {{site.kic_product_name}}: - -- [Namespace](#namespace) -- [Custom resources](#custom-resources) -- [RBAC permissions](#rbac-permissions) -- [Ingress Controller Deployment](#ingress-controller-deployment) -- [Kong Proxy service](#kong-proxy-service) -- [Database deployment and migrations](#database-deployment-and-migration) - -These resources are created if the reference deployment manifests are used -to deploy the {{site.kic_product_name}}. -The resources are explained below for users to gain an understanding of how -they are used, so that they can be tweaked as necessary for a specific use-case. - -### Namespace - -> optional - -The {{site.kic_product_name}} can be deployed in any [namespace][k8s-namespace]. -If {{site.kic_product_name}} is being used to proxy traffic for all namespaces -in a Kubernetes cluster, which is generally the case, -it is recommended that it is installed in a dedicated -`kong` namespace but it is not required to do so. - -The example deployments present in this repository automatically create a `kong` -namespace and deploy resources into that namespace. - -### Custom Resources - -> required - -The Ingress resource in Kubernetes is a fairly narrow and ambiguous API, and -doesn't offer resources to describe the specifics of proxying. -To overcome this limitation, custom resources are used as an -"extension" to the existing Ingress API. - -A few custom resources are bundled with the {{site.kic_product_name}} to -configure settings that are specific to Kong and provide fine-grained control -over the proxying behavior. - -Please refer to the [custom resources](/kubernetes-ingress-controller/{{page.release}}/concepts/custom-resources/) -concept document for details. - -### RBAC permissions - -> required - -The {{site.kic_product_name}} communicates with the Kubernetes API-server and -dynamically configures Kong to automatically load balance across pods -of a service as any service is scaled in our out. - -For this reason, it requires [RBAC][k8s-rbac] permissions to access resources -stored in the Kubernetes object store. - -It needs read permissions (get,list,watch) -on the following Kubernetes resources: - -- Endpoints -- Nodes -- Pods -- Secrets -- Ingress -- KongPlugins -- KongConsumers -- KongIngress - -By default, the controller listens for events and above resources across -all namespaces and will need access to these resources at the cluster level -(using `ClusterRole` and `ClusterRoleBinding`). - -In addition to these, it needs: - -- Create a ConfigMap and read and update ConfigMap for to facilitate - leader-election. Please read this [document](/kubernetes-ingress-controller/{{page.release}}/concepts/ha-and-scaling/) - for more details. -- Update permission on the Ingress resource to update the status of - the Ingress resource. - -If the Ingress Controller is listening for events on a single namespace, -these permissions can be updated to restrict these permissions to a specific -namespace using `Role` and `RoleBinding resources`. - -In addition to these, it is necessary to create a `ServiceAccount`, which -has the above permissions. The Ingress Controller Pod then has this -`ServiceAccount` association. This gives the Ingress Controller process -necessary authentication and authorization tokens to communicate with the -Kubernetes API-server. - -[rbac.yaml](https://github.com/Kong/kubernetes-ingress-controller/tree/v{{ page.version }}/config/rbac) -contains the permissions needed for the {{site.kic_product_name}} to operate correctly. - -### Ingress Controller deployment - -> required - -Kong Ingress deployment consists of the Ingress Controller deployed alongside -Kong. The deployment will be different depending on if a database is being -used or not. - -The deployment(s) is the core which actually runs the {{site.kic_product_name}}. - -See the [database](#database) section below for details. - -### Kong Proxy service - -> required - -Once the {{site.kic_product_name}} is deployed, one service is needed to -expose Kong outside the Kubernetes cluster so that it can receive all traffic -that is destined for the cluster and route it appropriately. -`kong-proxy` is a Kubernetes service which points to the Kong pods which are -capable of proxying request traffic. This service will be usually of type -`LoadBalancer`, however it is not required to be such. -The IP address of this service should be used to configure DNS records -of all the domains that Kong should be proxying, to route the traffic to Kong. - -### Database deployment and migration - -> optional - -The {{site.kic_product_name}} can run with or without a database. -If a database is being deployed, then following resources are required: - -- A `StatefulSet` which runs a PostgreSQL pod backed with a `PersistentVolume` - to store Kong's configuration. -- An internal `Service` which resolves to the PostgreSQL pod. This ensures - that Kong can find the PostgreSQL instance using DNS inside - the Kubernetes cluster. -- A batch `Job` to run schema migrations. This is required to be executed once - to install bootstrap Kong's database schema. - Please note that on an any upgrade for Kong version, another `Job` will - need to be created if the newer version contains any migrations. - -To figure out if you should be using a database or not, please refer to the -[database](#database) section below. - -## Deployment options - -Following are the difference options to consider while deploying the -{{site.kic_product_name}} for your specific use case: - -- [**Kubernetes Service Type**](#kubernetes-service-types): - Chose between Load Balancer vs Node-Port -- [**Database**](#database): - Backing Kong with a Database or running without a database -- [**Multiple Ingress Controllers**](#multiple-ingress-controllers): - Running multiple {{site.kic_product_name}}s inside the same Kubernetes cluster -- [**Runtime**](#runtime): - Using Kong or {{site.ee_product_name}} (for {{site.ee_product_name}} customers) - -### Kubernetes Service Types - -Once deployed, any Ingress Controller needs to be exposed outside the -Kubernetes cluster to start accepting external traffic. -In Kubernetes, `Service` abstraction is used to expose any application -to the rest of the cluster or outside the cluster. - -If your Kubernetes cluster is running in a cloud environment, where -Load Balancers can be provisioned with relative ease, it is recommended -that you use a Service of type `LoadBalancer` to expose Kong to the outside -world. For the Ingress Controller to function correctly, it is also required -that a L4 (or TCP) Load Balancer is used and not an L7 (HTTP(s)) one. - -If your Kubernetes cluster doesn't support a service of type `LoadBalancer`, -then it is possible to use a service of type `NodePort`. - -### Database - -Until Kong 1.0, a database was required to run Kong. -Kong 1.1 introduced a new mode, DB-less, in which Kong can be configured -using a config file, and removes the need to use a database. - -It is possible to deploy and run the {{site.kic_product_name}} with or without a -database. The choice depends on the specific use-case and results in no -loss of functionality. - -#### Without a database - -In DB-less deployments, Kong's Kubernetes ingress controller runs -alongside and dynamically configures -Kong as per the changes it receives from the Kubernetes API server. - -Following figure shows how this deployment looks like: - -![Kong DB-less](/assets/images/products/kubernetes-ingress-controller/dbless-deployment.png "Kong DB-less architecture") - -In this deployment, only one Deployment is required, which is comprised of -a Pod with two containers, a Kong container which proxies the requests -and a controller container which configures Kong. - -`kong-proxy` service would point to the ports of the Kong container in the -above deployment. - -Since each pod contains a controller and a Kong container, scaling out -simply requires horizontally scaling this deployment to handle more traffic -or to add redundancy in the infrastructure. - -#### With a Database - -In a deployment where Kong is backed by a DB, the deployment architecture -is a little different. - -Please refer to the below figure: - -![Kong with a database](/assets/images/products/kubernetes-ingress-controller/db-deployment.png "Kong with database") - -In this type of deployment, there are two types of deployments created, -separating the control and data flow: - -- **Control-plane**: This deployment consists of a pod(s) running - the controller alongside - a Kong container, which can only configure the database. This deployment - does not proxy any traffic but only configures Kong. If multiple - replicas of this pod are running, a leader election process will ensure - that only one of the pods is configuring Kong's database at a time. -- **Data-plane**: This deployment consists of pods running a - single Kong container which can proxy traffic based on the configuration - it loads from the database. This deployment should be scaled to respond - to change in traffic profiles and add redundancy to safeguard from node - failures. -- **Database**: The database is used to store Kong's configuration and propagate - changes to all the Kong pods in the cluster. All Kong containers, in the - cluster should be able to connect to this database. - -A database driven deployment should be used if your use-case requires -dynamic creation of Consumers and/or credentials in Kong at a scale large -enough that the consumers will not fit entirely in memory. - -## Multiple Ingress Controllers - -It is possible to run multiple instances of the {{site.kic_product_name}} or -run a Kong {{site.kic_product_name}} alongside other Ingress Controllers inside -the same Kubernetes cluster. - -There are a few different ways of accomplishing this: - -- Using `kubernetes.io/ingress.class` annotation: - It is common to deploy Ingress Controllers on a cluster level, meaning - an Ingress Controller will satisfy Ingress rules created in all the namespaces - inside a Kubernetes cluster. - Use the annotation on Ingress and Custom resources to segment - the Ingress resources between multiple Ingress Controllers. - **Warning!** - When you use another Ingress Controller, which is default for cluster - (without set any `kubernetes.io/ingress.class`), be aware of using default `kong` - ingress class. There is special behavior of the default `kong` ingress class, - where any ingress resource that is not annotated is picked up. - Therefore with different ingress class then `kong`, you have to use that - ingress class with every Kong CRD object (plugin, consumer) which you use. -- Namespace based isolation: - {{site.kic_product_name}} supports a deployment option where it will satisfy - Ingress resources in a specific namespace. With this model, one can deploy - a controller in multiple namespaces and they will run in an isolated manner. -- If you are using {{site.ee_product_name}}, you can run multiple Ingress Controllers - pointing to the same database and configuring different Workspaces inside - {{site.ee_product_name}}. With such a deployment, one can use either of the above - two approaches to segment Ingress resources into different Workspaces in - {{site.ee_product_name}}. - -## Runtime - -The {{site.kic_product_name}} is compatible with a variety of runtimes: - -### {{site.base_gateway}} (OSS) - -This is the [Open-Source Gateway](https://github.com/kong/kong) runtime. -The Ingress Controller is primarily developed against releases of the -open-source gateway. - -### {{site.ee_product_name}} K8S - -If you are a {{site.ee_product_name}} customer you have access to Enterprise K8s in -addition to OSS. - -{{site.ee_product_name}} K8S is a package that takes the open-source {{site.base_gateway}} and -adds enterprise-only plugins to it. - -You simply need to deploy {{site.ee_product_name}} K8S instead of the Open-Source -Gateway in-order to take full-advantage of enterprise plugins. - -### {{site.ee_product_name}} - -The {{site.kic_product_name}} is also compatible with the full-blown version of -{{site.ee_product_name}}. This runtime ships with Kong Manager, Kong Portal, and a -number of other enterprise-only features. -[This doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise) provides a high-level -overview of the architecture. - -[k8s-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ -[k8s-rbac]:https://kubernetes.io/docs/reference/access-authn-authz/rbac/ diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/design.md b/app/kubernetes-ingress-controller/2.0.x/concepts/design.md deleted file mode 100644 index 208b94bb2a89..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/design.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kong Ingress Controller Design ---- - -## Overview - -The {{site.kic_product_name}} configures Kong -using Ingress resources created inside a Kubernetes cluster. - -The {{site.kic_product_name}} is made up of two high level components: - -- Kong, the core proxy that handles all the traffic -- Controller Manager, a series of processes that synchronize the configuration from Kubernetes to Kong - -The {{site.kic_product_name}} performs more than just proxying the traffic coming -into a Kubernetes cluster. It is possible to configure plugins, -load balancing, health checking and leverage all that Kong offers in a -standalone installation. - -The following figure shows how it works: - -![high-level-design](/assets/images/products/kubernetes-ingress-controller/high-level-design.png "High Level Design") - -The Controller Manager listens for changes happening inside the Kubernetes -cluster and updates Kong in response to those changes to correctly -proxy all the traffic. - -Kong is updated dynamically to respond to changes around scaling, -configuration changes, failures that are happening inside a Kubernetes -cluster. - -## Translation - -Kubernetes resources are mapped to Kong resources to correctly -proxy all the traffic. - -The following figure describes the mapping between Kubernetes concepts -to Kong's configuration: - -![translating k8s to kong](/assets/images/products/kubernetes-ingress-controller/k8s-to-kong.png "Translating k8s resources to Kong") - -Let's go through how Kubernetes resources are being mapped to Kong's -configuration: - -- An [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) - resource in Kubernetes defines a set of rules for proxying - traffic. These rules corresponds to the concept of Route in Kong. -- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - inside Kubernetes is a way to abstract an application that is - running on a set of pods. - This maps to two objects in Kong: Service and Upstream. - The service object in Kong holds the information on the protocol - to use to talk to the upstream service and various other protocol - specific settings. The Upstream object defines load-balancing - and health-checking behavior. -- Pods associated with a Service in Kubernetes map as a Target belonging - to the Upstream (the upstream corresponding to the Kubernetes - Service) in Kong. Kong load balances across the Pods of your service. - This means that all requests flowing through Kong are not directed via - kube-proxy but directly to the pod. - -For more information on how Kong works with Routes, Services, and Upstreams, -please see the [Proxy](/gateway/latest/reference/proxy/) -and [Load balancing](/gateway/latest/reference/loadbalancing/) references. diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/ha-and-scaling.md b/app/kubernetes-ingress-controller/2.0.x/concepts/ha-and-scaling.md deleted file mode 100644 index 8b8df4cfbd44..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/ha-and-scaling.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: High-availability and Scaling ---- - -## High availability - -The {{site.kic_product_name}} is designed to be reasonably easy to operate and -be highly available, meaning, when some expected failures do occur, the -Controller should be able to continue to function with minimum possible -service disruption. - -The {{site.kic_product_name}} is composed of two parts: 1. Kong, which handles -the requests, 2. Controller, which configures Kong dynamically. - -Kong itself can be deployed in a Highly available manner by deploying -multiple instances (or pods). Kong nodes are state-less, meaning a Kong pod -can be terminated and restarted at any point of time. - -The controller itself can be stateful or stateless, depending on if a database -is being used or not. - -If a database is not used, then the Controller and Kong are deployed as -colocated containers in the same pod and each controller configures the Kong -container that it is running with. - -For cases when a database is necessary, the Controllers can be deployed -on multiple zones to provide redundancy. In such a case, a leader election -process will elect one instance as a leader, which will manipulate Kong's -configuration. - -### Leader election - -The {{site.kic_product_name}} performs a leader-election when multiple -instances of the controller are running to ensure that only a single Controller -is actively pushing changes to Kong's database (when running in DB-mode). -If multiple controllers are making changes to the database, it is possible that -the controllers step over each other. -If an instance of the controller fails, any other container which is a follower, -takes up the leadership and then continues syncing Kong's configuration from -Kubernetes. - -For this reason, the Controller needs permission to create a ConfigMap. -By default, the permission is given at Cluster level but it can be narrowed -down to a single namespace (using Role and RoleBinding) for a stricter RBAC -policy. - -It also needs permission to read and update this ConfigMap. -This permission can be specific to the ConfigMap that is being used -for leader-election purposes. -The name of the ConfigMap is derived from the value of election-id CLI flag -(default: `ingress-controller-leader`) and -ingress-class (default: `kong`) as: "-". -For example, the default ConfigMap that is used for leader election will -be "ingress-controller-leader-kong", and it will be present in the same -namespace that the controller is deployed in. - -## Scaling - -Kong is designed to be horizontally scalable, meaning as traffic increases, -multiple instances of Kong can be deployed to handle the increase in load. - -The configuration is either pumped into Kong directly via the Ingress -Controller or loaded via the database. Kong containers can be considered -stateless as the configuration is either loaded from the database (and -cached heavily in-memory) or loaded in-memory directly via a config file. - -One can use a `HorizontalPodAutoscaler` (HPA) based on metrics -like CPU utilization, bandwidth being used, total request count per second -to dynamically scale {{site.kic_product_name}} as the traffic profile changes. diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-classes.md b/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-classes.md deleted file mode 100644 index a233c20b1208..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-classes.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Kong Ingress Controller and Ingress Class ---- - -## Introduction - -The {{site.kic_product_name}} uses ingress classes to filter Kubernetes Ingress -objects and other resources before converting them into Kong configuration. -This allows it to coexist with other ingress controllers and/or other -deployments of the {{site.kic_product_name}} in the same cluster: a -{{site.kic_product_name}} will only process configuration marked for its use. - -## Configuring the controller ingress class - -The `--ingress-class` flag (or `CONTROLLER_INGRESS_CLASS` environment variable) -specifies the ingress class expected by the {{site.kic_product_name}}. By default, -it expects the `kong` class. - -## Loading resources by class - -The {{site.kic_product_name}} translates a variety of Kubernetes resources into -Kong configuration. Broadly speaking, we can separate these resources into two -categories: - -- Resources that the controller translates directly into Kong configuration. -- Resources referenced by some other resource, where the other resource is - directly translated into Kong configuration. - -For example, an Ingress is translated directly into a Kong route, and a -KongConsumer is translated directly into a -[Kong consumer](/gateway/api/admin-ee/latest/#/Consumers/list-consumer/). A Secret containing -an authentication plugin credential is _not_ translated directly: it is only -translated into Kong configuration if a KongConsumer resource references it. - -Because they create Kong configuration independent of any other resources, -directly-translated resources require an ingress class, and their class must -match the class configured for the controller. Referenced resources do not -require a class, but must be referenced by a directly translated resource -that matches the controller. - -### Adding class information to resources - -Most resources use a [kubernetes.io/ingress-class annotation][class-annotation] -to indicate their class. There are several exceptions: - -- v1 Ingress resources have a [dedicated `ingressClassName` field][ingress-class-name]. -- Knative Services [use the class specified][knative-class] by the - `ingress.class` key of the Knative installation's `config-network` ConfigMap. - You can optionally [override this on a per-Service basis][knative-override] - by adding a `networking.knative.dev/ingress.class` annotation to the Service. - -## When to use a custom class - -Using the default `kong` class is fine for simpler deployments, where only one -{{site.kic_product_name}} instance is running in a cluster. Changing the class is -typical when: - -- You install multiple Kong environments in one Kubernetes cluster to handle - different types of ingress traffic, e.g. when using separate Kong instances - to handle traffic on internal and external load balancers, or deploying - different types of non-production environments in a single test cluster. -- You install multiple controller instances alongside a single Kong cluster to - separate configuration into different Kong workspaces (using the - `--kong-workspace` flag) or to restrict which Kubernetes namespaces any one - controller instance has access to. - -## Examples - -Typical configurations will include a mix of resources that have class -information and resources that are referenced by them. For example, consider -the following configuration for authenticating a request, using a KongConsumer, -credential Secret, Ingress, and KongPlugin (a Service is implied, but not -shown): - -```yaml -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: dyadya-styopa - annotations: - kubernetes.io/ingress.class: "kong" -username: styopa -credentials: -- styopa-key - ---- - -kind: Secret -apiVersion: v1 -stringData: - key: bylkogdatomoryakom - kongCredType: key-auth -metadata: - name: styopa-key - ---- - -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - name: ktonezhnaet - annotations: - konghq.com/plugins: "key-auth-example" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /vsemznakom - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 - ---- - -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: key-auth-example -plugin: key-auth -``` - -The KongConsumer and Ingress resources both have class annotations, as they are -resources that the controller uses as a basis for building Kong configuration. -The Secret and KongPlugin _do not_ have class annotations, as they are -referenced by other resources that do. - -[class-annotation]:/kubernetes-ingress-controller/{{page.release}}/references/annotations/#kubernetesioingressclass -[knative-class]:/kubernetes-ingress-controller/{{page.release}}/guides/using-kong-with-knative/#ingress-class -[knative-override]:https://knative.dev/docs/serving/services/ingress-class/ -[ingress-class-name]:https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-versions.md b/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-versions.md deleted file mode 100644 index ecfff217c419..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/ingress-versions.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Ingress v1 and v1beta1 Differences ---- - -## Introduction - -Kubernetes 1.19 introduced a new `networking.k8s.io/v1` API for the [Ingress resource][kubernetes-ingress-doc]. -It standardizes common practices and clarifies implementation requirements that -were previously up to individual controller vendors. This document covers those -changes as they relate to {{site.kic_product_name}} and provides sample -equivalent `networking.k8s.io/v1beta1` and `networking.k8s.io/v1` resources for comparison. - -## Paths - -Both Ingress v1beta1 and v1 HTTP rules require a path, which represents a [URI -path][uri-rfc-paths]. Although v1beta1 had specified that paths were [POSIX -regular expressions][posix-regex] and enforced this, in practice most -controllers used other implementations that did not match the -specification. v1 seeks to reduce confusion by introducing several [path -types][path-types] and lifting restrictions on regular expression grammars used -by controllers. - -### networking.k8s.io/v1beta1 - -The controller passes paths directly to Kong and relies on its [path handling -logic][kong-paths]. The Kong proxy treats paths as a prefix unless they include -characters [not allowed in RFC 3986 paths][uri-rfc-paths], in which case the -proxy assumes they are a regular expression, and does not treat slashes as -special characters. For example, the prefix `/foo` can match any of the -following: - -``` -/foo -/foo/ -/foobar -/foo/bar -``` - -### networking.k8s.io/v1 - -Although v1 Ingresses provide path types with more clearly-defined logic, the -controller must still create Kong routes and work within the Kong proxy's -routing logic. As such, the controller translates Ingress rule paths to create -Kong routes that match one of the following specifications: `Exact`, `Prefix`, or `ImplementationSpecific`. - -#### Exact - -If `pathType` is `Exact`, the controller creates a Kong route with a regular -expression that matches the rule path only. For example, an exact rule for `/foo` in an -Ingress translates to a Kong route with a `/foo$` regular expression path. - -#### Prefix - -If `pathType` is `Prefix`, the controller creates a Kong route with two path -criteria. For example, `/foo` will create a route with a `/foo$` regular expression and -`/foo/` plain path. - -#### ImplementationSpecific - -The controller leaves `ImplementationSpecific` path rules entirely up to the Kong -router. It creates a route with the exact same path string as the Ingress rule. - -{:.important} -> Both `Prefix` and `Exact` paths modify the paths you - provide, and those modifications may interfere with user-provided regular - expressions. If you are using your own regular expressions in paths, use - `ImplementationSpecific` to avoid unexpected behavior. - -## Ingress class - -[Ingress class][ingress-class] indicates which resources an ingress controller -should process. It provides a means to separate out configuration intended for -other controllers or other instances of the {{site.kic_product_name}}. - -In v1beta1, ingress class was handled informally using -`kubernetes.io/ingress.class` [annotations][deprecated-annotation]. v1 -introduces a new [IngressClass resource][ingress-class-api] which provides -richer information about the controller. v1 Ingresses are bound to a class via -their `ingressClassName` field. - -For example, consider this v1beta1 Ingress: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kubernetes.io/ingress.class: "kong" -spec: - rules: - - host: example.com - http: - paths: - - path: /test - backend: - serviceName: echo - servicePort: 80 -``` - -Its ingress class annotation is set to `kong`, and ingress controllers set to -process `kong` class Ingresses will process it. - -In v1, the equivalent configuration declares a `kong` IngressClass resource -whose `metadata.name` field indicates the class name. The `ingressClassName` -value of the Ingress object must match the value of the `name` field in the -IngressClass metadata: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: kong -spec: - controller: ingress-controllers.konghq.com/kong ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-ingress -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - service: - name: test - port: - number: 80 -``` - -## Hostnames - -Ingress v1 formally codifies support for [wildcard hostnames][wildcard-hostnames]. -v1beta1 Ingresses did not reject wildcard hostnames, however, and Kong had -[existing support for them][kong-wildcard-hostnames]. - -As such, while the v1beta1 specification did not officially support wildcard -hostnames, you can use wildcard hostnames with either version. Setting a -hostname like `*.example.com` will match requests for both `foo.example.com` -and `bar.example.com` with either v1 or v1beta1 Ingresses. - -## Backend types - -Ingress v1 introduces support for backends other than Kubernetes Services through -[resource backends][resource-backends]. - -Kong does not support any dedicated resource backend configurations, though it -does have support for Routes without Services in some cases (for example, when -using the [AWS Lambda plugin][lambda-plugin]). For these routes, you should -create a placeholder Kubernetes Service for them, using an [ExternalName -Service][external-name] with an [RFC 2606][rfc-2606] invalid hostname, e.g. -`kong.invalid`. You can use these placeholder services with either v1 or -v1beta1 Ingresses. - -[kubernetes-ingress-doc]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-class]: /kubernetes-ingress-controller/{{page.release}}/concepts/ingress-classes -[uri-rfc-paths]: https://tools.ietf.org/html/rfc3986#section-3.3 -[posix-regex]: https://www.boost.org/doc/libs/1_38_0/libs/regex/doc/html/boost_regex/syntax/basic_extended.html -[path-types]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -[kong-paths]: /gateway/latest/reference/proxy/#request-path -[wildcard-hostnames]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards -[kong-wildcard-hostnames]: /gateway/latest/reference/proxy/#using-wildcard-hostnames -[resource-backends]: https://kubernetes.io/docs/concepts/services-networking/ingress/#resource-backend -[lambda-plugin]: /hub/kong-inc/aws-lambda/ -[external-name]: https://kubernetes.io/docs/concepts/services-networking/service/#externalname -[deprecated-annotation]: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation -[ingress-class-api]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-class-v1/ -[rfc-2606]:https://datatracker.ietf.org/doc/html/rfc2606 diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/k4k8s-with-kong-enterprise.md b/app/kubernetes-ingress-controller/2.0.x/concepts/k4k8s-with-kong-enterprise.md deleted file mode 100644 index 4331901f398f..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/k4k8s-with-kong-enterprise.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -Kong for Kubernetes is a {{site.kic_product_name}} built on top -of Open-Source {{site.base_gateway}}. - -If you are an Enterprise customer, you have an option of running the -[Enterprise version](/gateway/latest/install-and-run/kubernetes/) -of the Ingress Controller, which includes -all the Enterprise plugins but does not include Kong Manager or any -other Enterprise features. This makes it possible to -run the Ingress layer without a database, providing a very low -operational and maintenance footprint. - -However, in some cases, those enterprise features are necessary, -and for such use-cases we support another deployment - Kong for -Kubernetes with {{site.ee_product_name}}. - -As seen in the diagram below, this deployment consists of -Kong for Kubernetes deployed in Kubernetes, and is hooked up with -a database. If there are services running outside Kubernetes, -a regular {{site.base_gateway}} proxy can be deployed there and connected to the -same database. This provides a single pane of visibility of -all services that are running in your infrastructure. - -![architecture-overview](/assets/images/products/kubernetes-ingress-controller/k4k8s-with-kong-enterprise.png "K4K8S with {{site.ee_product_name}}") - -In this deployment model, the database for Kong can be hosted anywhere. -It can be a managed DBaaS service like Amazon RDS, Google Cloud -SQL or a Postgres instance managed in-house or even an instance -deployed on Kubernetes. -If you are following this model, please keep in mind the following: -- It is recommended to not deploy Postgres on Kubernetes, - due to the fact that running stateful applications on Kubernetes - is challenging to get right. -- Ensure that you have the same image/package of {{site.ee_product_name}} - running across the fleet. This means that all Kong instances that are - connected to the same database must use the - same version of {{site.ee_product_name}} package. - -[This guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise) -walks through the setup of the above architecture. diff --git a/app/kubernetes-ingress-controller/2.0.x/concepts/security.md b/app/kubernetes-ingress-controller/2.0.x/concepts/security.md deleted file mode 100644 index bbb08cd9073b..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/concepts/security.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Security ---- - -This document explains the security aspects of the {{site.kic_product_name}}. - -The {{site.kic_product_name}} communicates with Kubernetes API-server and Kong's -Admin API. APIs on both sides offer authentication/authorization features -and the controller integrates with them gracefully. - -## Kubernetes RBAC - -The {{site.kic_product_name}} is deployed with RBAC permissions as explained in the -[deployment](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment) document. -It has read and list permissions on most resources but requires update -and create permission for a few resources to provide seamless integration. -The permissions can be locked down further if needed depending on the specific -use-case. -This RBAC policy is associated with a ServiceAccount and the ServiceAccount -is associated with the {{site.kic_product_name}}. -The Controller uses the ServiceAccount credential to authenticate and -authorize itself against the Kubernetes API-server. - -## Kong Admin API Protection - -Kong's Admin API is used to control configuration of Kong and proxying behavior. -If an attacker happens to gain access to Kong's Admin API, they -will be able to perform all actions as an authorized user like -modifying or deleting Kong's configuration. -Hence, it is important that the deployment -ensures that the likelihood of this happening is as small as possible. - -In the example deployments, the Controller and Kong's Admin API communicate -over the loopback (`lo`) interface of the pod. -Kong is not performing any kind of authorization or -authentication on the Admin API, hence the API is accessible only -on the loopback interface to limit the attack surface. -Although not ideal, this setup requires fewer steps -to get started and can be further hardened as required. - -Please note that it is very important that Kong's Admin API is not accessible -inside the cluster as any malicious service can change Kong's configuration. -If you're exposing Kong's Admin API itself outside the cluster, please ensure -that you have the necessary authentication in place first. - -### Authentication on Kong's Admin API - -If Kong's Admin API is protected with one of the authentication plugins, -the Controller can authenticate itself against it to add another layer of -security. -The Controller comes with support for injecting arbitrary HTTP headers -in the requests it makes to Kong's Admin API, which can be used to inject -authentication credentials. -The headers can be specified using the CLI flag `--kong-admin-header` in the Ingress -Controller. - -The Ingress Controller will support mutual-TLS-based authentication on Kong's Admin -API in future. - -### {{site.ee_product_name}} RBAC - -{{site.ee_product_name}} comes with support for authentication and authorization on -Kong's Admin API. - -Once an RBAC token is provisioned, the {{site.kic_product_name}} can use the RBAC -token to authenticate against {{site.ee_product_name}}. Use the `--kong-admin-header` CLI -flag to pass the RBAC token the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/admission-webhook.md b/app/kubernetes-ingress-controller/2.0.x/deployment/admission-webhook.md deleted file mode 100644 index 167203c8faab..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/admission-webhook.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Validating Admission Controller ---- - -The {{site.kic_product_name}} ships with an Admission Controller for KongPlugin -and KongConsumer resources in the `configuration.konghq.com` API group. - -The Admission Controller needs a TLS certificate and key pair which -you need to generate as part of the deployment. - -Following guide walks through a setup of how to create the required key-pair -and enable the admission controller. - -Please note that this requires {{site.kic_product_name}} >= 0.6 to be -already installed in the cluster. - -## tldr; - -If you are using the stock YAML manifests to install and setup Kong for -Kubernetes, then you can setup the admission webhook using a single command: - -```bash -curl -sL https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/hack/deploy-admission-controller.sh | bash - -``` - -This script takes all the following commands and packs them together. -You need `kubectl` and `openssl` installed on your workstation for this to -work. - -## Create a certificate for the admission controller - -Kuberentes API-server makes an HTTPS call to the Admission Controller to verify -if the custom resource is valid or not. For this to work, Kubernetes API-server -needs to trust the CA certificate that is used to sign Admission Controller's -TLS certificate. - -This can be accomplished either using a self-signed certificate or using -Kubernetes CA. Follow one of the steps below and then go to -[Create the secret](#create-the-secret) step below. - -Please note the `CN` field of the x509 certificate takes the form -`..svc`, which -in the default case is `kong-validation-webhook.kong.svc`. - -### Using self-signed certificate - -Use openssl to generate a self-signed certificate: - -```bash -$ openssl req -x509 -newkey rsa:2048 -keyout tls.key -out tls.crt -days 365 \ - -nodes -subj "/CN=kong-validation-webhook.kong.svc" \ - -extensions EXT -config <( \ - printf "[dn]\nCN=kong-validation-webhook.kong.svc\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:kong-validation-webhook.kong.svc\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -Generating a 2048 bit RSA private key -..........................................................+++ -.............+++ -writing new private key to 'key.pem' -``` - -### Using in-built Kubernetes CA - -Kubernetes comes with an in-built CA which can be used to provision -a certificate for the Admission Controller. -Please refer to the -[this guide](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) -on how to generate a certificate using the in-built CA. - -### Create the secret - -Next, create a Kubernetes secret object based on the key and certificate that -was generatd in the previous steps. -Here, we assume that the PEM-encoded certificate is stored in a file named -`tls.crt` and private key is stored in `tls.key`. - -```bash -$ kubectl create secret tls kong-validation-webhook -n kong \ - --key tls.key --cert tls.crt -secret/kong-validation-webhook created -``` - -## Update the deployment - -Once the secret is created, update the Ingress Controller deployment: - -Execute the following command to patch the {{site.kic_product_name}} deployment -to mount the certificate and key pair and also enable the admission controller: - -```bash -$ kubectl patch deploy -n kong ingress-kong \ - -p '{"spec":{"template":{"spec":{"containers":[{"name":"ingress-controller","env":[{"name":"CONTROLLER_ADMISSION_WEBHOOK_LISTEN","value":":8080"}],"volumeMounts":[{"name":"validation-webhook","mountPath":"/admission-webhook"}]}],"volumes":[{"secret":{"secretName":"kong-validation-webhook"},"name":"validation-webhook"}]}}}}' -deployment.extensions/ingress-kong patched -``` - -## Enable the validating admission - -If you are using Kubernetes CA to generate the certificate, you don't need -to supply a CA certificate (in the `caBunde` param) -as part of the Validation Webhook configuration -as the API-server already trusts the internal CA. - -```bash -$ echo "apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: kong-validations -webhooks: -- name: validations.kong.konghq.com - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: ["v1", "v1beta1"] - rules: - - apiGroups: - - configuration.konghq.com - apiVersions: - - '*' - operations: - - CREATE - - UPDATE - resources: - - kongconsumers - - kongplugins - - apiGroups: - - '' - apiVersions: - - 'v1' - operations: - - CREATE - - UPDATE - resources: - - secrets - clientConfig: - service: - namespace: kong - name: kong-validation-webhook - caBundle: $(cat tls.crt | base64 -w 0) " | kubectl apply -f - -``` - -## Verify if it works - -### Verify duplicate KongConsumers - -Create a KongConsumer with username as `harry`: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -kongconsumer.configuration.konghq.com/harry created -``` - -Now, create another KongConsumer with the same username: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: harry2 - annotations: - kubernetes.io/ingress.class: kong -username: harry" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: consumer already exists -``` - -The validation webhook rejected the KongConsumer resource as there already -exists a consumer in Kong with the same username. - -### Verify incorrect KongPlugins - -Try to create the folowing KongPlugin resource. -The `foo` config property does not exist in the configuration definition and -hence the Admission Controller returns back an error. -If you remove the `foo: bar` configuration line, the plugin will be -created succesfully. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: request-id -config: - foo: bar - header_name: my-request-id -plugin: correlation-id -" | kubectl apply -f - -Error from server: error when creating "STDIN": admission webhook "validations.kong.konghq.com" denied the request: 400 Bad Request {"fields":{"config":{"foo":"unknown field"}},"name":"schema violation","code":2,"message":"schema violation (config.foo: unknown field)"} -``` - -### Verify incorrect credential secrets - -With 0.7 and above versions of the controller, validations also take place -for incorrect secret types and wrong parameters to the secrets: - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=basic-auth \ - --from-literal=username=foo -Error from server: admission webhook "validations.kong.konghq.com" denied the request: missing required field(s): password -``` - -```bash -$ kubectl create secret generic some-credential \ - --from-literal=kongCredType=wrong-auth \ - --from-literal=sdfkey=my-sooper-secret-key -Error from server: admission webhook "validations.kong.konghq.com" denied the request: invalid credential type: wrong-auth -``` diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/aks.md b/app/kubernetes-ingress-controller/2.0.x/deployment/aks.md deleted file mode 100644 index df60e4014709..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/aks.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong Ingress on Azure Kubernetes Service (AKS) ---- - -## Requirements - -1. A fully functional AKS cluster. - Please follow Azure's Guide to - [set up an AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the AKS Kubernetes - cluster you'll work on. The above AKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It will take a few minutes for all containers to start and report -healthy status. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Set up environment variables - -Next, set up an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's set up an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Microsoft Azure to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/eks.md b/app/kubernetes-ingress-controller/2.0.x/deployment/eks.md deleted file mode 100644 index 209a750f4edf..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/eks.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Kong Ingress on Elastic Kubernetes Service (EKS) ---- - -## Requirements - -1. A fully functional EKS cluster. - Please follow Amazon's Guide to - [set up an EKS cluster](https://aws.amazon.com/getting-started/projects/deploy-kubernetes-app-amazon-eks/). -2. Basic understanding of Kubernetes -3. A working `kubectl` linked to the EKS Kubernetes - cluster we will work on. The above EKS setup guide will help - you set this up. - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -It may take a few minutes for all containers to start and report -healthy statuses. - -Alternatively, you can use our helm chart as well. -Please ensure that you have Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, create an environment variable with the IP address at which -Kong is accessible. This IP address sends requests to the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 example.eu-west-1.elb.amazonaws.com 80:31929/TCP,443:31408/TCP 57d -``` - -Create an environment variable to hold the ELB hostname: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].hostname}" service -n kong kong-proxy) -``` - -> Note: It may take some time for Amazon to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## TLS configuration - -Versions of Kong prior to 2.0.0 default to using [the "modern" cipher suite -list](https://wiki.mozilla.org/Security/Server_Side_TLS). This is not -compatible with ELBs when the ELB terminates TLS at the edge and establishes a -new session with Kong. This error will appear in Kong's logs: - -``` -*7961 SSL_do_handshake() failed (SSL: error:1417A0C1:SSL routines:tls_post_process_client_hello:no shared cipher) while SSL handshaking -``` - -To correct this issue, set `KONG_SSL_CIPHER_SUITE=intermediate` in your -environment variables. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/gke.md b/app/kubernetes-ingress-controller/2.0.x/deployment/gke.md deleted file mode 100644 index 84603abc5744..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/gke.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Kong Ingress on Google Kubernetes Engine (GKE) ---- - -## Requirements - -1. A fully functional GKE cluster. - The easiest way to do this is to do it via the web UI: - Go to Google Cloud's console > Kubernetes Engine > Cluster > - Create a new cluster. - This documentation has been tested on a zonal cluster in - europe-west-4a using 1.10.5-gke.4 as Master version. - The default pool has been assigned 2 nodes of kind 1VCPU - with 3.75GB memory (default setting). - The OS used is COS (Container Optimized OS) and the auto-scaling - has been enabled. Default settings are being used except for - `HTTP load balancing` which has been disabled (you probably want to use - Kong features for this). For more information on GKE clusters, - refer to - [the GKE documentation](https://cloud.google.com/kubernetes-engine/docs/). -1. If you wish to use a static IP for Kong, you have to reserve a static IP - address (in Google Cloud's console > VPC network > - External IP addresses). For information, - you must create a regional IP - global is not supported as `loadBalancerIP` yet) -1. Basic understanding of Kubernetes -1. A working `kubectl` linked to the GKE Kubernetes - cluster we will work on. For information, you can associate a new `kubectl` - context by using: - - ```bash - gcloud container clusters get-credentials --zone --project - ``` - -## Update User Permissions - -> Because of [the way Kubernetes Engine checks permissions -when you create a Role or ClusterRole](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control), you must -first create a RoleBinding that grants you all of -the permissions included in the role you want to create. -An example workaround is to create a RoleBinding that -gives your Google identity a cluster-admin role -before attempting to create additional Role or -ClusterRole permissions. -This is a known issue in RBAC in Kubernetes and -Kubernetes Engine versions 1.6 and -later. - -A fast workaround: - -```yaml - -echo -n " -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cluster-admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: User - name: # usually the Google account - # e.g.: example@testorg.com - namespace: kube-system" | kubectl apply -f - - -``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -*Note:* this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -Execute the following command to get the IP address at which Kong is accessible: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 203.0.113.42 80:31929/TCP,443:31408/TCP 57d -``` - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for Google to actually associate the -IP address to the `kong-proxy` Service. - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s-enterprise.md b/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s-enterprise.md deleted file mode 100644 index 0e5dfae63cb9..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s-enterprise.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Kong for Kubernetes Enterprise ---- - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -## Prerequisites - -Before we can deploy Kong, we need to satisfy one prerequisite: create a license -secret. - -To create this secret, provision the `kong` namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -Enterprise version requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -If you do not have one, please contact your sales representative. -Save the license file temporarily to disk with filename `license` -and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -## Installers - -Once the secret is in-place, we can proceed with installation. - -Kong for Kubernetes can be installed using an installer of -your choice: - -### YAML manifests - -Execute the following to install Kong for Kubernetes Enterprise using YAML -manifests: - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml -``` - -It takes a few minutes the first time this setup is done. - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-6ffcf8c447-5qv6z 2/2 Running 1 44m -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get service kong-proxy -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.254.78 35.233.198.16 80:32697/TCP,443:32365/TCP 22h -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP address assigned to the service. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. - -Let's setup an environment variable to hold the IP address: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -> Note: It may take a while for your cloud provider to actually associate the -IP address to the `kong-proxy` Service. - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes Enterprise: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/enterprise-k8s -``` - -You can use the above URL as a base kustomization and build on top of it -as well. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name - --namespace kong \ - -f values.yaml \ - --set ingressController.installCRDs=false -``` - -### Example values.yaml -``` -image: - repository: kong/kong-gateway - tag: 2.2.1.0-alpine -env: - LICENSE_DATA: - valueFrom: - secretKeyRef: - name: kong-enterprise-license - key: license -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes Enterprise - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s.md b/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s.md deleted file mode 100644 index 288703cd434b..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/k4k8s.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kong for Kubernetes ---- - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -## Installers - -Kong for Kubernetes can be installed using an installer of -your choice. - -Once you've installed Kong for Kubernetes, -jump to the [next section](#using-kong-for-kubernetes) -on using it. - -### YAML manifests - -Please pick one of the following guides depending on your platform: - -- [Minikube](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/) -- [Google Kubernetes Engine(GKE) by Google](/kubernetes-ingress-controller/{{page.release}}/deployment/gke/) -- [Elastic Kubernetes Service(EKS) by Amazon](/kubernetes-ingress-controller/{{page.release}}/deployment/eks/) -- [Azure Kubernetes Service(AKS) by Microsoft](/kubernetes-ingress-controller/{{page.release}}/deployment/aks/) - -### Kustomize - -{:.important} -> Kustomize manifests are provided for illustration purposes only and are not officially supported by Kong. -There is no guarantee of backwards compatibility or upgrade capabilities for our Kustomize manifests. -For a production setup with Kong support, use the [Helm chart](https://github.com/kong/charts). - -Use Kustomize to install Kong for Kubernetes: - -``` -kustomize build github.com/kong/kubernetes-ingress-controller/deploy/manifests/base -``` - -You can use the above URL as a base kustomization and build on top of it -to make it suite better for your cluster and use-case. - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -### Helm - -You can use Helm to install Kong via the official Helm chart: - -``` -$ helm repo add kong https://charts.konghq.com -$ helm repo update - - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -Once installed, set an environment variable, $PROXY_IP with the External IP address of -the `demo-kong-proxy` service in `kong` namespace: - -``` -export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong demo-kong-proxy) -``` - -## Using Kong for Kubernetes - -Once you've installed Kong for Kubernetes, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/kong-enterprise.md b/app/kubernetes-ingress-controller/2.0.x/deployment/kong-enterprise.md deleted file mode 100644 index e2f7648a283c..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/kong-enterprise.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Kong for Kubernetes with {{site.ee_product_name}} ---- - -This guide walks through setting up the {{site.kic_product_name}} using Kong -Enterprise. This architecture is described in detail in [this doc](/kubernetes-ingress-controller/{{page.release}}/concepts/k4k8s-with-kong-enterprise/). - -We assume that we start from scratch and you don't have {{site.ee_product_name}} -deployed. For the sake of simplicity, we will deploy {{site.ee_product_name}} and -its database in Kubernetes itself. You can safely run them outside -Kubernetes as well. - -## Prerequisites - -Before we can deploy the {{site.kic_product_name}} with {{site.ee_product_name}}, -we need to satisfy the following prerequisites: -- [{{site.ee_product_name}} License secret](#kong-enterprise-license-secret) -- [{{site.ee_product_name}} bootstrap password](#kong-enterprise-bootstrap-password) - -In order to create these secrets, let's provision the `kong` -namespace first: - -```bash -$ kubectl create namespace kong -namespace/kong created -``` - -### {{site.ee_product_name}} License secret - -{{site.ee_product_name}} requires a valid license to run. -As part of sign up for {{site.ee_product_name}}, you should have received a license file. -Save the license file temporarily to disk and execute the following: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -n kong -secret/kong-enterprise-license created -``` - -Please note that `-n kong` specifies the namespace in which you are deploying - the {{site.kic_product_name}}. If you are deploying in a different namespace, - please change this value. - -### {{site.ee_product_name}} bootstrap password - -Next, we need to create a secret containing the password using which we can login into Kong Manager. -Please replace `cloudnative` with a random password of your choice and note it down. - -```bash -$ kubectl create secret generic kong-enterprise-superuser-password -n kong --from-literal=password=cloudnative -secret/kong-enterprise-superuser-password created -``` - -Once these are created, we are ready to deploy {{site.ee_product_name}} -Ingress Controller. - -## Install - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/single/all-in-one-postgres-enterprise.yaml -``` - -It takes a little while to bootstrap the database. -Once bootstrapped, you should see the {{site.kic_product_name}} running with -{{site.ee_product_name}} as its core: - -```bash -$ kubectl get pods -n kong -NAME READY STATUS RESTARTS AGE -ingress-kong-548b9cff98-n44zj 2/2 Running 0 21s -kong-migrations-pzrzz 0/1 Completed 0 4m3s -postgres-0 1/1 Running 0 4m3s -``` - -You can also see the `kong-proxy` service: - -```bash -$ kubectl get services -n kong -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-admin LoadBalancer 10.63.255.85 34.83.95.105 80:30574/TCP 4m35s -kong-manager LoadBalancer 10.63.247.16 34.83.242.237 80:31045/TCP 4m34s -kong-proxy LoadBalancer 10.63.242.31 35.230.122.13 80:32006/TCP,443:32007/TCP 4m34s -kong-validation-webhook ClusterIP 10.63.240.154 443/TCP 4m34s -postgres ClusterIP 10.63.241.104 5432/TCP 4m34s - -``` - -> Note: Depending on the Kubernetes distribution you are using, you might or might -not see an external IP assigned to the three LoadBalancer type services. Please see -your provider's guide on obtaining an IP address for a Kubernetes Service of -type `LoadBalancer`. If you are running Minikube, you will not get an -external IP address. - -### Setup Kong Manager - -Next, if you browse to the IP address or host of the `kong-manager` service in your Browser, -which in our case is `http://34.83.242.237`. -Kong Manager should load in your browser. -Try logging in to the Manager with the username `kong_admin` -and the password you supplied in the prerequisite, it should fail. -The reason being we've not yet told Kong Manager where it can find the Admin API. - -Let's set that up. We will take the External IP address of `kong-admin` service and -set the environment variable `KONG_ADMIN_API_URI`: - -```bash -KONG_ADMIN_IP=$(kubectl get svc -n kong kong-admin --output=jsonpath='{.status.loadBalancer.ingress[0].ip}') -kubectl patch deployment -n kong ingress-kong -p "{\"spec\": { \"template\" : { \"spec\" : {\"containers\":[{\"name\":\"proxy\",\"env\": [{ \"name\" : \"KONG_ADMIN_API_URI\", \"value\": \"${KONG_ADMIN_IP}\" }]}]}}}}" -``` - -It will take a few minutes to roll out the updated deployment and once the new -`ingress-kong` pod is up and running, you should be able to log into the Kong Manager UI. - -As you follow along with other guides on how to use your newly deployed the {{site.kic_product_name}}, -you will be able to browse Kong Manager and see changes reflected in the UI as Kong's -configuration changes. - -## Using Kong for Kubernetes with {{site.ee_product_name}} - -Let's setup an environment variable to hold the IP address of `kong-proxy` service: - -```bash -$ export PROXY_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy) -``` - -Once you've installed Kong for Kubernetes Enterprise, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn more. - -## Customizing by use-case - -The deployment in this guide is a point to start using Ingress Controller. -Based on your existing architecture, this deployment will require custom -work to make sure that it needs all of your requirements. - -In this guide, there are three load-balancers deployed for each of -Kong Proxy, Kong Admin and Kong Manager services. It is possible and -recommended to instead have a single Load balancer and then use DNS names -and Ingress resources to expose the Admin and Manager services outside -the cluster. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/minikube.md b/app/kubernetes-ingress-controller/2.0.x/deployment/minikube.md deleted file mode 100644 index d56a366bb0bd..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/minikube.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Kong Ingress on Minikube ---- - -## Setup Minikube - -1. Install [`minikube`](https://github.com/kubernetes/minikube) - - Minikube is a tool that makes it easy to run Kubernetes locally. - Minikube runs a single-node Kubernetes cluster inside a VM on your laptop - for users looking to try out Kubernetes or develop with it day-to-day. - -1. Start `minikube` - - ```bash - minikube start - ``` - - It will take a few minutes to get all resources provisioned. - - ```bash - kubectl get nodes - ``` - -## Deploy the {{site.kic_product_name}} {#deploy-kic} - -Deploy the {{site.kic_product_name}} using `kubectl`: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -Alternatively, you can use our helm chart as well. -Please ensure that you've Tiller working and then execute: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -# Helm 3 -$ helm install kong/kong --generate-name --set ingressController.installCRDs=false -``` - -> Note: this process could take up to five minutes the first time. - -## Setup environment variables - -Next, we will setup an environment variable with the IP address at which -Kong is accessible. This will be used to actually send requests into the -Kubernetes cluster. - -```bash -$ export PROXY_IP=$(minikube service -n kong kong-proxy --url | head -1) -# If installed by helm, service name would be "-kong-proxy". -# $ export PROXY_IP=$(minikube service -kong-proxy --url | head -1) -$ echo $PROXY_IP -http://192.168.99.100:32728 -``` - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. diff --git a/app/kubernetes-ingress-controller/2.0.x/deployment/overview.md b/app/kubernetes-ingress-controller/2.0.x/deployment/overview.md deleted file mode 100644 index 5a37579b5a99..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/deployment/overview.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Installing and Configuring ---- - -## Getting started - -If you are getting started with Kong for Kubernetes, -install it on Minikube using our Minikube [setup guide](/kubernetes-ingress-controller/{{page.release}}/deployment/minikube/). - -Once you've installed the {{site.kic_product_name}}, please follow our -[getting started](/kubernetes-ingress-controller/{{page.release}}/guides/getting-started) tutorial to learn -about how to use the Ingress Controller. - -## Overview - -The {{site.kic_product_name}} can be installed on a local, managed -or any Kubernetes cluster which supports a service of type `LoadBalancer`. - -As explained in the [deployment document](/kubernetes-ingress-controller/{{page.release}}/concepts/deployment/), there -are a variety of configurations and runtimes for the {{site.kic_product_name}}. - -The following sections detail on deployment steps for all the different -runtimes: - -## Kong for Kubernetes - - -Kong for Kubernetes is an Ingress Controller based on the -Open-Source {{site.base_gateway}}. It consists of two components: - -- **Kong**: the Open-Source Gateway -- **Controller**: a daemon process that integrates with the - Kubernetes platform and configures Kong. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s/) to deploy Kong for Kubernetes -using an installation method of your choice. - -## Kong for Kubernetes Enterprise - -Kong for Kubernetes Enterprise is an enhanced version of -the Open-Source Ingress Controller. It includes all -Enterprise plugins and comes with 24x7 support for worry-free -production deployment. -This is available to enterprise customers of Kong, Inc. only. - -Please follow [this guide](/kubernetes-ingress-controller/{{page.release}}/deployment/k4k8s-enterprise/) to deploy Kong for Kubernetes -Enterprise if you have purchased or are trying out {{site.ee_product_name}}. - -## Kong for Kubernetes with {{site.ee_product_name}} - -Kong for Kubernetes can integrate with {{site.ee_product_name}} to -provide a single pane of visibility across all of your services -that are running in Kubernetes and non-Kubernetes environments. - -This [guide](/kubernetes-ingress-controller/{{page.release}}/deployment/kong-enterprise/) goes into details of -the architecture and how one can set that up. - -## Admission Controller - -The {{site.kic_product_name}} also ships with a Validating -Admission Controller that -can be enabled to verify KongConsumer, KongPlugin and Secret -resources as they are created. -Please follow the [admission-webhook](/kubernetes-ingress-controller/{{page.release}}/deployment/admission-webhook/) deployment -guide to set it up. diff --git a/app/kubernetes-ingress-controller/2.0.x/examples/001_multiple-services.yaml b/app/kubernetes-ingress-controller/2.0.x/examples/001_multiple-services.yaml deleted file mode 100644 index 92b2d19bbdc3..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/examples/001_multiple-services.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: http-svc -spec: - replicas: 1 - selector: - matchLabels: - app: http-svc - template: - metadata: - labels: - app: http-svc - spec: - containers: - - name: http-svc - image: docker.io/kennethreitz/httpbin - ports: - - containerPort: 80 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - ---- - -apiVersion: v1 -kind: Service -metadata: - name: billing - labels: - app: billing -spec: - type: NodePort - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: http-svc ---- - -apiVersion: v1 -kind: Service -metadata: - name: invoice - labels: - app: invoice -spec: - type: NodePort - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: http-svc ---- - -apiVersion: v1 -kind: Service -metadata: - name: comments - labels: - app: comments -spec: - type: NodePort - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: http-svc diff --git a/app/kubernetes-ingress-controller/2.0.x/faq.md b/app/kubernetes-ingress-controller/2.0.x/faq.md deleted file mode 100644 index 8f401a8b6e3c..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/faq.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: FAQs ---- - -### Why endpoints and not services? - -The {{site.kic_product_name}} does not use -[Services][k8s-service] to route traffic -to the pods. Instead, it uses the Endpoints API -to bypass [kube-proxy][kube-proxy] -to allow Kong features like session affinity and -custom load balancing algorithms. -It also removes overhead -such as conntrack entries for iptables DNAT. - -### Is it possible to create consumers using the Admin API? - -From version 0.5.0 onwards, the {{site.kic_product_name}} tags each entity -that it manages inside Kong's database and only manages the entities that -it creates. -This means that if consumers and credentials are created dynamically, they -won't be deleted by the Ingress Controller. - -[k8s-service]: https://kubernetes.io/docs/concepts/services-networking/service -[kube-proxy]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/cert-manager.md b/app/kubernetes-ingress-controller/2.0.x/guides/cert-manager.md deleted file mode 100644 index 16e856f910c0..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/cert-manager.md +++ /dev/null @@ -1,377 +0,0 @@ ---- -title: Using cert-manager for automated TLS certificate ---- - -This guide will walk through steps to set up the {{site.kic_product_name}} with -cert-manager to automate certificate management using Let's Encrypt. -Any ACME-based CA can be used in-place of Let's Encrypt as well. - -## Before you begin - -You will need the following: - -- Kubernetes cluster that can provision an IP address that is routable from - the Internet. If you don't have one, you can use GKE or any managed k8s - cloud offering. -- A domain name for which you control the DNS records. - This is necessary so that - Let's Encrypt can verify the ownership of the domain and issue a certificate. - In the current guide, we use `example.com`, please replace this with a domain - you control. - -This tutorial was written using Google Kubernetes Engine. - -## Set up the {{site.kic_product_name}} {#set-up-kic} - -Execute the following to install the Ingress Controller: - -```bash -$ kubectl create -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{ page.version }}/deploy/single/all-in-one-dbless.yaml -namespace/kong created -customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.example.com created -customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.example.com created -serviceaccount/kong-serviceaccount created -clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created -configmap/kong-server-blocks created -service/kong-proxy created -service/kong-validation-webhook created -deployment.extensions/kong created -``` - -## Set up cert-manager - -Please follow cert-manager's [documentation](https://cert-manager.io/docs/installation/) -on how to install cert-manager onto your cluster. - -Once installed, verify all the components are running using: - -```bash -kubectl get all -n cert-manager -NAME READY STATUS RESTARTS AGE -pod/cert-manager-86478c5ff-mkhb9 1/1 Running 0 23m -pod/cert-manager-cainjector-65dbccb8b6-6dnjl 1/1 Running 0 23m -pod/cert-manager-webhook-78f9d55fdf-5wcnp 1/1 Running 0 23m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cert-manager-webhook ClusterIP 10.63.240.251 443/TCP 23m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/cert-manager 1 1 1 1 23m -deployment.apps/cert-manager-cainjector 1 1 1 1 23m -deployment.apps/cert-manager-webhook 1 1 1 1 23m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cert-manager-86478c5ff 1 1 1 23m -replicaset.apps/cert-manager-cainjector-65dbccb8b6 1 1 1 23m -replicaset.apps/cert-manager-webhook-78f9d55fdf 1 1 1 23m -``` - -## Set up your application - -Any HTTP-based application can be used, for the purpose of the demo, install -the following echo server: - -```bash -$ kubectl apply -f https://bit.ly/echo-service -service/echo created -deployment.apps/echo created -``` - -## Set up DNS - -Get the IP address of the load balancer for Kong: - -```bash -$ kubectl get service -n kong kong-proxy -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kong-proxy LoadBalancer 10.63.250.199 35.233.170.67 80:31929/TCP,443:31408/TCP 58d -``` - -To get only the IP address: - -```bash -$ kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" service -n kong kong-proxy -35.233.170.67 -``` - -Please note that the IP address in your case will be different. - -Next, setup a DNS records to resolve `proxy.example.com` to the -above IP address: - -```bash -$ dig +short proxy.example.com -35.233.170.67 -``` - -Next, setup a CNAME DNS record to resolve `demo.example.com` to -`proxy.example.com`. - -```bash -$ dig +short demo.yolo2.com -proxy.example.com. -35.233.170.67 -``` - -## Expose your application to the Internet - -Setup an Ingress rule to expose the application: - -```bash -$ echo " -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-example-com -spec: - ingressClassName: kong - rules: - - host: demo.example.com - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: echo - port: - number: 80 -" | kubectl apply -f - -ingress.extensions/demo-example-com created -``` - -Access your application: - -```bash -$ curl -I demo.example.com -HTTP/1.1 200 OK -Content-Type: text/plain; charset=UTF-8 -Connection: keep-alive -Date: Fri, 21 Jun 2019 21:14:45 GMT -Server: echoserver -X-Kong-Upstream-Latency: 1 -X-Kong-Proxy-Latency: 1 -Via: kong/1.1.2 -``` - -## Request TLS Certificate from Let's Encrypt - -First, setup a ClusterIssuer for cert-manager - -```bash -$ echo "apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod - namespace: cert-manager -spec: - acme: - email: user@example.com #please change this - privateKeySecretRef: - name: letsencrypt-prod - server: https://acme-v02.api.letsencrypt.org/directory - solvers: - - http01: - ingress: - class: kong" | kubectl apply -f - -clusterissuer.cert-manager.io/letsencrypt-prod configured -``` - -*Note*: If you run into issues configuring this, -be sure that the group (`cert-manager.io`) and -version (`v1`) match those in the output of -`kubectl describe crd clusterissuer`. -This directs cert-manager which CA authority to use to issue the certificate. - -Next, update your Ingress resource to provision a certificate and then use it: - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-example-com - annotations: - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod -spec: - ingressClassName: kong - tls: - - secretName: demo-example-com - hosts: - - demo.example.com - rules: - - host: demo.example.com - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: echo - port: - number: 80 -' | kubectl apply -f - -ingress.extensions/demo-example-com configured -``` - -Things to note here: - -- The annotation `kubernetes.io/tls-acme` is set to `true`, informing - cert-manager that it should provision a certificate for hosts in this - Ingress using ACME protocol. -- `certmanager.k8s.io/cluster-issuer` is set to `letsencrypt-prod`, directing - cert-manager to use Let's Encrypt's production server to provision a TLS - certificate. -- `tls` section of the Ingress directs the {{site.kic_product_name}} to use the - secret `demo-example-com` to encrypt the traffic for `demo.example.com`. - This secret will be created by cert-manager. - -Once you update the Ingress resource, cert-manager will start provisioning -the certificate and in sometime the certificate will be available for use. - -You can track the progress of certificate issuance: - -```bash -$ kubectl describe certificate demo-example-com -Name: demo-example-com -Namespace: default -Labels: -Annotations: -API Version: certmanager.k8s.io/v1 -Kind: Certificate -Metadata: - Creation Timestamp: 2019-06-21T20:41:54Z - Generation: 1 - Owner References: - API Version: networking.k8s.io/v1 - Block Owner Deletion: true - Controller: true - Kind: Ingress - Name: demo-example-com - UID: 261d15d3-9464-11e9-9965-42010a8a01ad - Resource Version: 19561898 - Self Link: /apis/certmanager.k8s.io/v1/namespaces/default/certificates/demo-example-com - UID: 014d3f1d-9465-11e9-9965-42010a8a01ad -Spec: - Acme: - Config: - Domains: - demo.example.com - Http 01: - Dns Names: - demo.example.com - Issuer Ref: - Kind: ClusterIssuer - Name: letsencrypt-prod - Secret Name: demo-example-com -Status: - Conditions: - Last Transition Time: 2019-06-21T20:42:20Z - Message: Certificate is up to date and has not expired - Reason: Ready - Status: True - Type: Ready - Not After: 2019-09-19T19:42:19Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Generated 53m cert-manager Generated new private key - Normal GenerateSelfSigned 53m cert-manager Generated temporary self signed certificate - Normal OrderCreated 53m cert-manager Created Order resource "demo-example-com-3811625818" - Normal OrderComplete 53m cert-manager Order "demo-example-com-3811625818" completed successfully - Normal CertIssued 53m cert-manager Certificate issued successfully -``` - -## Test HTTPS - -Once all is in place, you can use HTTPS: - -```bash -$ curl -v https://demo.example.com -* Rebuilt URL to: https://demo.example.com/ -* Trying 35.233.170.67... -* TCP_NODELAY set -* Connected to demo.example.com (35.233.170.67) port 443 (#0) -* ALPN, offering h2 -* ALPN, offering http/1.1 -* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH -* successfully set certificate verify locations: -* CAfile: /etc/ssl/cert.pem - CApath: none -* TLSv1.2 (OUT), TLS handshake, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Server hello (2): -* TLSv1.2 (IN), TLS handshake, Certificate (11): -* TLSv1.2 (IN), TLS handshake, Server key exchange (12): -* TLSv1.2 (IN), TLS handshake, Server finished (14): -* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): -* TLSv1.2 (OUT), TLS change cipher, Client hello (1): -* TLSv1.2 (OUT), TLS handshake, Finished (20): -* TLSv1.2 (IN), TLS change cipher, Client hello (1): -* TLSv1.2 (IN), TLS handshake, Finished (20): -* SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 -* ALPN, server accepted to use http/1.1 -* Server certificate: -* subject: CN=demo.example.com -* start date: Jun 21 19:42:19 2019 GMT -* expire date: Sep 19 19:42:19 2019 GMT -* subjectAltName: host "demo.example.com" matched cert's "demo.example.com" -* issuer: C=US; O=Let's Encrypt; CN=Let's Encrypt Authority X3 -* SSL certificate verify ok. -> GET / HTTP/1.1 -> Host: demo.example.com -> User-Agent: curl/7.54.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Content-Type: text/plain; charset=UTF-8 -< Transfer-Encoding: chunked -< Connection: keep-alive -< Date: Fri, 21 Jun 2019 21:37:43 GMT -< Server: echoserver -< X-Kong-Upstream-Latency: 1 -< X-Kong-Proxy-Latency: 1 -< Via: kong/1.1.2 -< - - -Hostname: echo-d778ffcd8-52ddj - -Pod Information: - node name: gke-harry-k8s-dev-default-pool-bb23a167-9w4t - pod name: echo-d778ffcd8-52ddj - pod namespace: default - pod IP:10.60.2.246 - -Server values: - server_version=nginx: 1.12.2 - lua: 10010 - -Request Information: - client_address=10.60.2.239 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://demo.example.com:8080/ - -Request Headers: - accept=*/* - connection=keep-alive - host=demo.example.com - user-agent=curl/7.54.0 - x-forwarded-for=10.138.0.6 - x-forwarded-host=demo.example.com - x-forwarded-port=8443 - x-forwarded-proto=https - x-real-ip=10.138.0.6 - -Request Body: - -no body in request- -``` - -Et voilà ! You've secured your API with HTTPS -with the {{site.kic_product_name}} and cert-manager. diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/configure-acl-plugin.md b/app/kubernetes-ingress-controller/2.0.x/guides/configure-acl-plugin.md deleted file mode 100644 index 483f13ce9214..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/configure-acl-plugin.md +++ /dev/null @@ -1,773 +0,0 @@ ---- -title: Configuring ACL Plugin ---- - -This guide walks through configuring the Kong ACL Plugin. The ACL Plugin -requires the use of at least one Authentication plugin. This example will use -the JWT Auth Plugin - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create two Ingress rules to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /get - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /post - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -``` - -Test the Ingress rules: - -```bash -$ curl -i $PROXY_IP/get -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -``` - -## Add JWT authentication to the service - -With Kong, adding authentication in front of an API is as simple as -enabling a plugin. Let's enable JWT authentication - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: app-jwt -plugin: jwt -" | kubectl apply -f - -``` - -Now let's associate the plugin to the Ingress rules we created earlier. - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /get - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /post - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -``` - -Any requests matching the proxying rules for `demo-get` and `demo` post will -now require a valid JWT and the consumer for the JWT to be associate with the -right ACL. - -```bash -$ curl -i $PROXY_IP/get - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} - -$ curl -i --data "foo=bar" -X POST $PROXY_IP/post - -HTTP/1.1 401 Unauthorized -Date: Mon, 06 Apr 2020 07:27:44 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 50 -X-Kong-Response-Latency: 2 -Server: kong/2.0.2 - - -{"message":"Unauthorized"} -``` - -You should get a 401 response telling you that the request is not authorized. - -## Provision Consumers - -Let's provision 2 KongConsumer resources: - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -" | kubectl apply -f - -``` - -## Secrets - -Next, let's provision some Secrets for the KongConsumers to reference. Each -ACL will need its own Secret and each JWT public key will need its own Secret. -The credential type is specified in the `kongCredType` field. In this -case we'll be using `jwt` and `acl`. You can create a secret using any other -method as well. - -The JWT signing algorithm is set in the `algorithm` field. The if using a -public key like this example it is stored in the `rsa_pulic_key` field. If you -are using a secret signing key, use the `secret` field. The last field to set -if you are using `RS256` or `ES256` is the `key` field. This should match the -`iss` field in the JWT you will be sending. You can check this value by -decoding your JWT over at [https://jwt.io](https://jwt.io) - -Since we are using the Secret resource, Kubernetes will encrypt and store the -JWT signing key and ACL group for us. - -### JWT signing key - -```bash -# create secret for jwt public key -$ kubectl create secret \ - generic app-admin-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="admin-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - MIIBIjA.... - -----END PUBLIC KEY-----" - -# create a second secret with a different key -$ kubectl create secret \ - generic app-user-jwt \ - --from-literal=kongCredType=jwt \ - --from-literal=key="user-issuer" \ - --from-literal=algorithm=RS256 \ - --from-literal=secret="dummy" \ - --from-literal=rsa_public_key="-----BEGIN PUBLIC KEY----- - qwerlkjqer.... - -----END PUBLIC KEY-----" -``` - -Validation requirements impose that even if the `secret` is not used for algorithm -`RS256` or `ES256` the field `secret` must be present, so put some dummy value for it. - -## Assign the credentials - -In order to for the ACL and JWT to be validated by Kong, the secrets will need -to be referenced by the KongConsumers we created earlier. Let's update those. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt -" | kubectl apply -f - -``` - -## Use the credential - -Now to use a JWT to pass authentication. Let's store the user and admin jwt's -in some environment variables. `USER_JWT` and `ADMIN_JWT`. If you are using -an identity provider, you should be able to login and get out a JWT from their -API. If you are generating your own, go through the process of generating your -own. - -Let's test the get route - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} - - -``` - -## Adding ACL's - -The JWT plugin doesn't provide the ability to authroize a given issuer to a -given ingress. To do this we need to use the ACL plugin. Let's create an admin -ACL config - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: admin-acl -plugin: acl -config: - whitelist: ['app-admin'] -" | kubectl apply -f - -``` - -Then let's create a user ACL config. We want our admin to be able to access -the same resources as the user, so let's make sure we include them in the -whitelist. - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongPlugin -metadata: - name: plain-user-acl -plugin: acl -config: - whitelist: ['app-user','app-admin'] -" | kubectl apply -f - -``` - -Next let's create the secrets that will define the ACL groups. - -```bash -# create secrets for acl groups -$ kubectl create secret \ - generic app-admin-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-admin - -$ kubectl create secret \ - generic app-user-acl \ - --from-literal=kongCredType=acl \ - --from-literal=group=app-user -``` - -After we create the secrets, the consumers need to be updated to reference the -ACL credentials - -```bash -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: admin - annotations: - kubernetes.io/ingress.class: kong -username: admin -credentials: - - app-admin-jwt - - app-admin-acl -" | kubectl apply -f - - -$ echo " -apiVersion: configuration.konghq.com/v1 -kind: KongConsumer -metadata: - name: plain-user - annotations: - kubernetes.io/ingress.class: kong -username: plain-user -credentials: - - app-user-jwt - - app-user-acl -" | kubectl apply -f - -``` - -The last thing to configure is the ingress to use the new plguins. Note, if you -set more than one ACL plugin, the last one supplied will be the only one -evaluated. - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-get - annotations: - konghq.com/plugins: app-jwt,plain-user-acl - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /get - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - - -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo-post - annotations: - konghq.com/plugins: app-jwt,admin-acl - konghq.com/strip-path: "false" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /post - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -``` - -Now let's test it. - -```bash -$ curl -i -H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-user", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "plain-user", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - - - -$ curl -i -H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/get - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 947 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 06:45:45 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 7 -X-Kong-Proxy-Latency: 2 -Via: kong/2.0.2 - -{ - "args": {}, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "a6edc906-2f9f-5fb2-a373-efac406f0ef2", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "origin": "192.168.0.3", - "url": "http://some.url/get" -} - -``` - -Now let's test the post route - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${USER_JWT}" $PROXY_IP/post -HTTP/1.1 403 Forbidden -Date: Mon, 06 Apr 2020 07:11:59 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 45 -X-Kong-Response-Latency: 1 -Server: kong/2.0.2 - -{"message":"You cannot consume this service"} -``` - -The `plain-user` user is not in the `admin-acl` whitelist, and is therefore -unauthorized to access the resource - -```bash -$ curl -i -X POST --data "foo=bar" \ --H "Authorization: Bearer ${ADMIN_JWT}" $PROXY_IP/post - -HTTP/1.1 200 OK -Content-Type: application/json -Content-Length: 1156 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 06 Apr 2020 07:20:35 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 4 -X-Kong-Proxy-Latency: 4 -Via: kong/2.0.2 - -{ - "args": {}, - "data": "", - "files": {}, - "form": { - "foo": "bar" - }, - "headers": { - "Accept": "*/*", - "Authorization": "Bearer eyJ...", - "Connection": "keep-alive", - "Content-Length": "7", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "localhost", - "User-Agent": "curl/7.54.0", - "X-Consumer-Groups": "app-admin", - "X-Consumer-Id": "393611c3-aea9-510d-9be4-ac429ecc53f4", - "X-Consumer-Username": "admin", - "X-Credential-Identifier": "localhost", - "X-Forwarded-Host": "localhost" - }, - "json": null, - "origin": "192.168.0.3", - "url": "http://some.url/post" -} -``` diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-fallback-service.md b/app/kubernetes-ingress-controller/2.0.x/guides/configuring-fallback-service.md deleted file mode 100644 index 9795dc6be34b..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-fallback-service.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Configuring a fallback service ---- - -This guide walks through how to setup a fallback service using Ingress -resource. The fallback service will receive all requests that don't -match against any of the defined Ingress rules. -This can be useful for scenarios where you would like to return a 404 page -to the end user if the user clicks on a dead link or inputs an incorrect URL. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup a simple HTTP service in the -cluster and proxy it. - -```bash -$ echo ' -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fallback-svc -spec: - replicas: 1 - selector: - matchLabels: - app: fallback-svc - template: - metadata: - labels: - app: fallback-svc - spec: - containers: - - name: fallback-svc - image: hashicorp/http-echo - args: - - "-text" - - "This is not the path you are looking for. - Fallback service" - ports: - - containerPort: 5678 ---- -apiVersion: v1 -kind: Service -metadata: - name: fallback-svc - labels: - app: fallback-svc -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 5678 - protocol: TCP - name: http - selector: - app: fallback-svc -' | kubectl apply -f - -``` - -Result: - -```bash -deployment.apps/fallback-svc created -service/fallback-svc created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /foo - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup a fallback service - -Let's deploy another sample service service: - -```bash -$ kubectl apply -f https://bit.ly/fallback-svc -deployment.extensions/fallback-svc created -service/fallback-svc created -``` - -Next, let's set up an Ingress rule to make it the fallback service -to send all requests to it that don't match any of our Ingress rules: - -```bash -$ echo " -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: fallback - annotations: -spec: - ingressClassName: kong - backend: - service: - name: fallback-svc - port: - number: 80 -" | kubectl apply -f - -``` - -## Test it - -Now send a request with a request property that doesn't match against -any of the defined rules: - -```bash -$ curl $PROXY_IP/random-path -This is not the path you are looking for. - Fallback service -``` - -The above message comes from the fallback service that was deployed in the -last step. - -Create more Ingress rules, some complicated regex based ones and -see how requests that don't match any rules, are forwarded to the -fallback service. - -You can also use Kong's request-termination plugin on the `fallback` -Ingress resource to terminate all requests at Kong, without -forwarding them inside your infrastructure. diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-health-checks.md b/app/kubernetes-ingress-controller/2.0.x/guides/configuring-health-checks.md deleted file mode 100644 index ef787c001802..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-health-checks.md +++ /dev/null @@ -1,356 +0,0 @@ ---- -title: Setting up Active and Passive health checks ---- - -In this guide, we will go through steps necessary to setup active and passive -health checking using the {{site.kic_product_name}}. This configuration allows -Kong to automatically short-circuit requests to specific Pods that are -mis-behaving in your Kubernetes Cluster. - -> Please make sure to use {{site.kic_product_name}} >= 0.6 as the previous -versions contain a [bug](https://github.com/hbagdi/go-kong/issues/6). - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} onto your Kubernetes cluster. - -## Testing connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected since Kong doesn't know how to proxy any requests yet. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -$ kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /foo - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -Observe the headers and you can see that Kong has proxied the request correctly. - -## Setup passive health checking - -Now, let's setup passive HTTP health-check for our service. -All health-checking is done at Service-level and not Ingress-level. - -Add the following KongIngress resource: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking created -``` - -Here, we are configuring Kong to short-circuit requests to a pod -if a pod throws 3 consecutive errors. - -Next, associate the KongIngress resource with `httpbin` service: - -```bash -$ kubectl patch svc httpbin -p '{"metadata":{"annotations":{"konghq.com/override":"demo-health-checking"}}}' -service/httpbin patched -``` - -Now, let's send some traffic to test if this works: - -Let's send 2 requests that represent a failure from upstream -and then send a request for 200. -Here we are using `/status/500` to simulate a failure from upstream. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Kong has not short-circuited because there were only two failures. -Let's send 3 requests and open the circuit, and then send a normal request. - -```bash -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -$ curl -i $PROXY_IP/foo/status/500 -HTTP/1.1 500 INTERNAL SERVER ERROR -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:24 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 0 -Via: kong/1.2.1 - -curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 22:41:19 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} - -``` - -As we can see, Kong returns back a 503, representing that the service is -unavailable. Since we have only one pod of httpbin running in our cluster, -and that is throwing errors, Kong will not proxy anymore requests. - -Now we have a few options: - -- Delete the current httpbin pod; Kong will then proxy requests to the new - pod that comes in its place. -- Scale the httpbin deployment; Kong will then proxy requests to the new - pods and leave the short-circuited pod out of the loop. -- Manually change the pod health status in Kong using Kong's Admin API. - -These options highlight the fact that once a circuit is opened because of -errors, there is no way for Kong to close the circuit again. - -This is a feature which some services might need, where once a pod starts -throwing errors, manual intervention is necessary before that pod can -again handle requests. -To get around this, we can introduce active health-check, where each instance -of Kong actively probes pods to figure out if they are healthy or not. - -## Setup active health checking - -Let's update our KongIngress resource to use active health-checks: - -```bash -$ echo "apiVersion: configuration.konghq.com/v1 -kind: KongIngress -metadata: - name: demo-health-checking -upstream: - healthchecks: - active: - healthy: - interval: 5 - successes: 3 - http_path: /status/200 - type: http - unhealthy: - http_failures: 1 - interval: 5 - passive: - healthy: - successes: 3 - unhealthy: - http_failures: 3" | kubectl apply -f - -kongingress.configuration.konghq.com/demo-health-checking configured -``` - -Here, we are configuring Kong to actively probe `/status/200` every 5 seconds. -If a pod is unhealthy (from Kong's perspective), -3 successful probes will change the status of the pod to healthy and Kong -will again start to forward requests to that pod. - -Now, the requests should flow once again: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 - -``` - -Let's trip the circuit again by sending three requests that will return -500s from httpbin: - -```bash -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -$ curl -i $PROXY_IP/foo/status/500 -``` - -Now, sending the following request will fail for about 15 seconds, -the duration it will take active healthchecks to re-classify -the httpbin pod as healthy again. - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 503 Service Temporarily Unavailable -Date: Mon, 05 Aug 2019 23:17:47 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 58 -Server: kong/1.2.1 - -{"message":"failure to get a peer from the ring-balancer"} -``` - -After 15 seconds, you will see: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Mon, 05 Aug 2019 22:38:26 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -As we can see, active health-checks automatically marked a pod as healthy -when passive health-checks marked it unhealthy. - -## Bonus - -Scale the `httpbin` and `ingress-kong` deployments and observe how -multiple pods change the outcome of the above demo. - -Read more about health-checks and ciruit breaker in Kong's -[documentation](/gateway/latest/reference/health-checks-circuit-breakers). diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-https-redirect.md b/app/kubernetes-ingress-controller/2.0.x/guides/configuring-https-redirect.md deleted file mode 100644 index 02b4a6269fdb..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/configuring-https-redirect.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Configuring https redirect ---- - -This guide walks through how to configure the {{site.kic_product_name}} to -redirect HTTP request to HTTPS so that all communication -from the external world to your APIs and microservices is encrypted. - -## Installation - -Please follow the [deployment](/kubernetes-ingress-controller/{{page.release}}/deployment/overview/) documentation to install -the {{site.kic_product_name}} on your Kubernetes cluster. - -## Testing Connectivity to Kong - -This guide assumes that the `PROXY_IP` environment variable is -set to contain the IP address or URL pointing to Kong. -Please follow one of the -[deployment guides](/kubernetes-ingress-controller/{{page.release}}/deployment/overview) to configure this environment variable. - -If everything is setup correctly, making a request to Kong should return -HTTP 404 Not Found. - -```bash -$ curl -i $PROXY_IP -HTTP/1.1 404 Not Found -Date: Fri, 21 Jun 2019 17:01:07 GMT -Content-Type: application/json; charset=utf-8 -Connection: keep-alive -Content-Length: 48 -Server: kong/1.2.1 - -{"message":"no Route matched with those values"} -``` - -This is expected as Kong does not yet know how to proxy the request. - -## Setup a Sample Service - -For the purpose of this guide, we will setup an [httpbin](https://httpbin.org) -service in the cluster and proxy it. - -```bash -kubectl apply -f https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/v{{site.data.kong_latest_KIC.version}}/deploy/manifests/httpbin.yaml -service/httpbin created -deployment.apps/httpbin created -``` - -Create an Ingress rule to proxy the httpbin service we just created: - -```bash -$ echo ' -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: demo - annotations: - konghq.com/strip-path: "true" -spec: - ingressClassName: kong - rules: - - http: - paths: - - path: /foo - pathType: ImplementationSpecific - backend: - service: - name: httpbin - port: - number: 80 -' | kubectl apply -f - -ingress.extensions/demo created -``` - -Test the Ingress rule: - -```bash -$ curl -i $PROXY_IP/foo/status/200 -HTTP/1.1 200 OK -Content-Type: text/html; charset=utf-8 -Content-Length: 0 -Connection: keep-alive -Server: gunicorn/19.9.0 -Date: Wed, 17 Jul 2019 19:25:32 GMT -Access-Control-Allow-Origin: * -Access-Control-Allow-Credentials: true -X-Kong-Upstream-Latency: 2 -X-Kong-Proxy-Latency: 1 -Via: kong/1.2.1 -``` - -## Setup HTTPS redirect - -To instruct Kong to redirect all HTTP requests matching this Ingress rule to -HTTPS, update its annotations to limit its protocols to HTTPS only and -issue a 308 redirect: - -```bash -$ kubectl patch ingress demo -p '{"metadata":{"annotations":{"konghq.com/protocols":"https","konghq.com/https-redirect-status-code":"308"}}}' -ingress.extensions/demo patched -``` - -## Test it - -Now, making a plain-text HTTP request to Kong will result in a redirect -being issued from Kong: - -```bash -$ curl $PROXY_IP/foo/headers -I -HTTP/1.1 308 Permanent Redirect -Date: Tue, 06 Aug 2019 18:04:38 GMT -Content-Type: text/html -Content-Length: 167 -Connection: keep-alive -Location: https://35.197.125.63/foo/headers -Server: kong/1.2.1 -``` - -The `Location` header will contain the URL you need to use for an HTTPS -request. Please note that this URL will be different depending on your -installation method. You can also grab the IP address of the load balancer -fronting Kong and send a HTTPS request to test it. - -Let's test it: - -```bash -$ curl -k https://35.197.125.63/foo/headers -{ - "headers": { - "Accept": "*/*", - "Connection": "keep-alive", - "Host": "35.197.125.63", - "User-Agent": "curl/7.54.0", - "X-Forwarded-Host": "35.197.125.63" - } -} -``` - -We can see that Kong correctly serves the request only on HTTPS protocol -and redirects the user if plaint-text HTTP protocol is used. -We had to use `-k` flag in cURL to skip certificate validation as the -certificate served by Kong is a self-signed one. -If you are serving this traffic via a domain that you control and have -configured TLS properties for it, then the flag won't -be necessary. - -If you have a domain that you control but don't have TLS/SSL certificates -for it, please check out out -[Using cert-manager with Kong](/kubernetes-ingress-controller/{{page.release}}/guides/cert-manager) guide which can get TLS -certificates setup for you automatically. And it's free, thanks to -Let's Encrypt! diff --git a/app/kubernetes-ingress-controller/2.0.x/guides/getting-started-istio.md b/app/kubernetes-ingress-controller/2.0.x/guides/getting-started-istio.md deleted file mode 100644 index fdafa0de9480..000000000000 --- a/app/kubernetes-ingress-controller/2.0.x/guides/getting-started-istio.md +++ /dev/null @@ -1,521 +0,0 @@ ---- -title: Running the Kong Ingress Controller with Istio ---- - -This guide walks you through deploying {{site.base_gateway}} with {{site.kic_product_name}} -as the gateway for [Istio][istio] as your service mesh solution. - -See the [version compatibility reference][compat] for the -tested compatible versions of {{site.kic_product_name}} and Istio. - -[istio]:https://istio.io -[compat]:/kubernetes-ingress-controller/{{page.release}}/references/version-compatibility/#istio - -## Overview - -[Istio][istio] is a popular [service mesh][mesh] that enables [traffic -management][traffic-management], [security][security], and [observability][obs] -features for [Kubernetes][k8s] clusters. - -With {{site.base_gateway}} and Istio, you can combine the -mesh features of Istio inside your cluster with Kong's rich feature set -for ingress traffic from outside the cluster. - -This guide shows how to: - -* Install Istio and {{site.base_gateway}} with {{site.kic_product_name}} in your cluster. -* Deploy an example Istio-enabled application. -* Deploy an `Ingress` customized with a Kong plugin for the example application. -* Make requests to the sample application via Kong and Istio. -* Explore the observability features of Istio to visualize cluster traffic. - -[istio]:https://istio.io -[mesh]:https://istio.io/latest/docs/concepts/ -[traffic-management]:https://istio.io/latest/docs/concepts/traffic-management/ -[security]:https://istio.io/latest/docs/concepts/security/ -[obs]:https://istio.io/latest/docs/concepts/observability/ -[k8s]:https://kubernetes.io - -### Prerequisites - -* A Kubernetes cluster v1.19 or later -* [kubectl][kubectl] v1.19 or later -* [Helm][helm] v3.5 or later -* [cURL][curl] version 7.x.x - -You can use a managed cluster from a cloud provider, such as [AWS (EKS)][eks], -[Google Cloud (GKE)][gke], or [Azure (AKS)][aks], or you can work locally with -tools such as [Minikube][minikube] or [Microk8s][microk8s]. - -Your Kubernetes cluster must provision - `LoadBalancer` type [Services][svc]. Cloud providers generally - automate `LoadBalancer` type `Service` provisioning with their default - settings, but if you run your cluster elsewhere you might need to check - the relevant documentation for details. See also the Kubernetes documentation - for [external load balancers][svc-lb]. - -Some of the `kubectl` calls in this guide assume your test - cluster is the current default cluster context. To check, or for more - information, see the Kubernetes documentation on - [configuring access to multiple clusters][contexts]. - -[kubectl]:https://kubernetes.io/docs/tasks/tools/#kubectl -[helm]:https://helm.sh/ -[curl]:https://github.com/curl/curl -[eks]:https://aws.amazon.com/eks/ -[gke]:https://cloud.google.com/kubernetes-engine/ -[aks]:https://azure.microsoft.com/services/kubernetes-service/ -[minikube]:https://github.com/kubernetes/minikube -[microk8s]:https://microk8s.io/ -[svc]:https://kubernetes.io/docs/concepts/services-networking/service/ -[svc-lb]:https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#external-load-balancer-providers -[contexts]:https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/ - -### Download and verify Istio - -This guide shows how to install with [istioctl][istioctl], -because it's the community recommended method. The [Istio installation guides][istio-install] -explain alternative deployment mechanisms. - -You can also explore the - [Istio FAQ][install-method-faq] for more information about the differences - between methods. However, if you choose another installation method, - you might need to adjust the examples in this guide. - -1. Download the `istioctl` command-line utility for your platform: - - ```console - curl -s -L https://istio.io/downloadIstio | ISTIO_VERSION=1.11.2 sh - - ``` - - The response includes instructions to set up the `istioctl` program locally and perform - pre-check validation of the Istio installation. - -1. Make sure to add `istioctl` to your shell's path: - - ```console - export PATH="$PATH:$PWD/istio-1.11.2/bin" - ``` - -1. Verify that `istioctl` is working, and run checks on your -Kubernetes cluster to ensure Istio will deploy to it properly: - - ```console - istioctl x precheck - ``` - -[istio-install]:https://istio.io/latest/docs/setup/install/ -[istio-quickstart]:https://istio.io/latest/docs/setup/getting-started/ -[istioctl]:https://istio.io/latest/docs/setup/install/istioctl/ -[install-method-faq]:https://istio.io/latest/about/faq/#install-method-selection - -### Deploy Istio - -Istio provides [configuration profiles][istio-profiles] to let you customize your Istio -deployment, and default profiles are included with the installation. This guide works with -the `demo` profile, which is meant for testing and evaluation. - -Deploy Istio with the `demo` profile: - -```console -istioctl install --set profile=demo -y -``` - -[istio-profiles]:https://istio.io/latest/docs/setup/additional-setup/config-profiles/ -[k8s-namespaces]:https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - -### Create an Istio-enabled namespace for {{site.kic_product_name}} - -To integrate Istio's mesh functionality in any given Kubernetes -[Pod][k8s-pods], a [namespace][k8s-namespaces] must be [labeled][k8s-labels] -with the `istio-injection=enabled` label to instruct [IstioD][istiod] -- the main -control program for Istio -- to manage the pods and add them to the mesh network. - -1. Create the Istio-enabled namespace: - - ```console - kubectl create namespace kong-istio - ``` - -1. Enable the namespace for the Istio mesh: - - ```console - kubectl label namespace kong-istio istio-injection=enabled - ``` - -[k8s-pods]:https://kubernetes.io/docs/concepts/workloads/pods/ -[k8s-namespaces]:https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ -[k8s-labels]:https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -[istiod]:https://istio.io/latest/docs/ops/deployment/architecture/#istiod - -### Deploy {{site.base_gateway}} and {{site.kic_product_name}} - -The [Kong Helm Chart][chart] deploys a Pod that includes containers for -{{site.base_gateway}} and {{site.kic_product_name}}. Here's how to deploy it -to the Istio-enabled `kong-istio` namespace. - -1. Make sure you have the Kong Helm repository configured locally: - - ```console - helm repo add kong https://charts.konghq.com && helm repo update - ``` - -1. Deploy the chart: - - ```console - helm install -n kong-istio kong-istio kong/kong - ``` - -1. Verify that Kong containers are deployed and the Istio sidecar container is injected -properly: - - ```console - kubectl describe pod -n kong-istio -l app.kubernetes.io/instance=kong-istio - ``` - The output should look like: - - ```console - Events: - Type Reason From Message - ---- ------ ---- ------- - Normal Scheduled default-scheduler Successfully assigned kong-istio/kong-istio-kong-8f875f9fd-qsv4p to gke-istio-testing-default-pool-403b2219-l5ns - Normal Pulled kubelet Container image "docker.io/istio/proxyv2:1.11.2" already present on machine - Normal Created kubelet Created container istio-init - Normal Started kubelet Started container istio-init - Normal Pulling kubelet Pulling image "kong/kubernetes-ingress-controller:1.3" - Normal Pulled kubelet Successfully pulled image "kong/kubernetes-ingress-controller:1.3" in 2.645390171s - Normal Created kubelet Created container ingress-controller - Normal Started kubelet Started container ingress-controller - Normal Pulling kubelet Pulling image "kong:2.5" - Normal Pulled kubelet Successfully pulled image "kong:2.5" in 3.982679281s - Normal Created kubelet Created container proxy - Normal Started kubelet Started container proxy - Normal Pulled kubelet Container image "docker.io/istio/proxyv2:1.11.2" already present on machine - Normal Created kubelet Created container istio-proxy - Normal Started kubelet Started container istio-proxy - ``` - - See also the Kubernetes documentation on [using kubectl to fetch pod details][k8s-describe-pod]. - -[chart]:https://github.com/Kong/charts -[k8s-describe-pod]:https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application-introspection/#using-kubectl-describe-pod-to-fetch-details-about-pods - -### Deploy BookInfo example application - -The Istio [BookInfo][bookinfo] application provides a basic example that lets -you explore and evaluate Istio's mesh features. - -As in previous steps, you create a namespace, add the appropriate label, and -then deploy. - -1. Create the namespace: - - ```console - kubectl create namespace bookinfo - ``` - -1. Label the namespace for Istio injection: - - ```console - kubectl label namespace bookinfo istio-injection=enabled - ``` - - -1. Deploy the `BookInfo` app from the Istio bundle: - - ```console - kubectl -n bookinfo apply -f istio-1.11.2/samples/bookinfo/platform/kube/bookinfo.yaml - ``` - - The response should look like: - - ```console - service/details created - serviceaccount/bookinfo-details created - deployment.apps/details-v1 created - service/ratings created - serviceaccount/bookinfo-ratings created - deployment.apps/ratings-v1 created - service/reviews created - serviceaccount/bookinfo-reviews created - deployment.apps/reviews-v1 created - deployment.apps/reviews-v2 created - deployment.apps/reviews-v3 created - service/productpage created - serviceaccount/bookinfo-productpage created - deployment.apps/productpage-v1 created - ``` - -1. Wait until the application is up: - - ```console - kubectl -n bookinfo wait --timeout 120s --for=condition=Available deployment productpage-v1 - ``` - -[bookinfo]:https://istio.io/latest/docs/examples/bookinfo/ - -### Access BookInfo externally through {{site.base_gateway}} - -At this point the BookInfo application is available only internally. Here's how -to expose it as a service with [Ingress][ingress]. - -1. Save the following as `bookinfo-ingress.yaml`: - - ```yaml - apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - name: productpage - namespace: bookinfo - spec: - ingressClassName: kong - rules: - - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: productpage - port: - number: 9080 - ``` - -1. Apply the manifest: - - ```console - kubectl apply -f bookinfo-ingress.yaml - ``` - -1. To make HTTP requests using {{site.base_gateway}} as ingress, you need the IP address of the -load balancer. Get the `LoadBalancer` address and store it in a local `PROXY_IP` -environment variable: - - ```console - export PROXY_IP=$(kubectl -n kong-istio get svc kong-istio-kong-proxy -o=jsonpath='{.status.loadBalancer.ingress[0].ip}') - ``` - - If you're running your cluster on AWS, specify `.hostname` instead of `.ip`. This is because the AWS load balancer - provides only a DNS name. This can also happen with other cluster providers. - - Make sure to check that the value of `$PROXY_IP` is the value of the external host. - You can check with `kubectl get svc kong-istio-kong-proxy`. - -1. Make an external connection request: - - ```console - curl -s -v http://$PROXY_IP | head -4 - ``` - - The response should look like: - - ```console - curl -s -v http://$PROXY_IP | head -4 - * Trying 127.0.0.1:80... - * Connected to 127.0.0.1 (127.0.0.1) port 80 (#0) - > GET / HTTP/1.1 - > Host: 127.0.0.1 - > User-Agent: curl/7.76.1 - > Accept: */* - > - * Mark bundle as not supporting multiuse - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 1683 - < server: istio-envoy - < x-envoy-upstream-service-time: 6 - < x-kong-upstream-latency: 4 - < x-kong-proxy-latency: 1 - < via: kong/2.5.0 - < x-envoy-decorator-operation: kong-istio-kong-proxy.kong-istio.svc.cluster.local:80/* - < - { [1079 bytes data] - * Connection #0 to host 127.0.0.1 left intact - - - - Simple Bookstore App - ``` - -Note the following in the response: - -- `Simple Bookstore App` - connected to the `BookInfo` app as expected. -- `server: istio-envoy` - the Istio mesh network is in use for the `BookInfo` product page. -- `via: kong/2.5.0` - {{site.base_gateway}} provides the connection to the backend `BookInfo` service. - -[ingress]:https://kubernetes.io/docs/concepts/services-networking/ingress/ - -### Configure rate limiting with a Kong plugin - -To demonstrate Kong features for Istio enabled services, you can create a [KongPlugin][kongplugin] -to enforce Kong rate-limiting on ingress requests to the `BookInfo` -service. The plugin adds rate limiting to the `BookInfo` application and limits outside access to 30 requests per minute. - -1. Save the following as `bookinfo-ratelimiter.yaml`: - - ```yaml - apiVersion: configuration.konghq.com/v1 - kind: KongPlugin - metadata: - name: rate-limit - namespace: bookinfo - plugin: rate-limiting - config: - minute: 30 - policy: local - ``` - -1. Apply the manifest: - - ```console - kubectl apply -f bookinfo-ratelimiter.yaml - ``` - -1. Add an annotation to the `Ingress` resource to attach rate limiting: - - ```console - kubectl -n bookinfo patch ingress productpage -p '{"metadata":{"annotations":{"konghq.com/plugins":"rate-limit"}}}' - ``` - -1. Inspect the headers in the response from the BookInfo product page: - - ```console - curl -s -v http://$PROXY_IP 2>&1 | grep ratelimit - ``` - - The response should look like: - - ```console - < x-ratelimit-remaining-minute: 26 - < x-ratelimit-limit-minute: 30 - < ratelimit-remaining: 26 - < ratelimit-limit: 30 - < ratelimit-reset: 2 - ``` - -For more examples of Kong features to add to your environment, see the -[available guides][kic-guides]. - -[kongplugin]:/kubernetes-ingress-controller/{{page.release}}/guides/using-kongplugin-resource/ -[anns]:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -[kic-guides]:/kubernetes-ingress-controller/{{page.release}}/guides/overview/ - -### Mesh network observability with Kiali - -For observability, Istio includes a web console called [Kiali][kiali] that can -provide [topology][kiali-topology], [health][kiali-health], and -[other features][kiali-features] to provide insights -into your application traffic. - -You also need a Prometheus metrics server, and Grafana for visualization dashboards. -Istio includes these as addons. Here's what to do: - -1. Install Prometheus: - - ```console - kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.11/samples/addons/prometheus.yaml - ``` - The response should look like: - - ```console - serviceaccount/prometheus created - configmap/prometheus created - clusterrole.rbac.authorization.k8s.io/prometheus created - clusterrolebinding.rbac.authorization.k8s.io/prometheus created - service/prometheus created - deployment.apps/prometheus created - ``` - -1. Install [Grafana][graphana]: - - ```console - kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.11/samples/addons/grafana.yaml - ``` - - The response should look like: - - ```console - serviceaccount/grafana created - configmap/grafana created - service/grafana created - deployment.apps/grafana created - configmap/istio-grafana-dashboards created - configmap/istio-services-grafana-dashboards created - ``` - -1. Install Kiali: - - ```console - kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.11/samples/addons/kiali.yaml - ``` - - The response should look like: - - ```console - serviceaccount/kiali created - configmap/kiali created - clusterrole.rbac.authorization.k8s.io/kiali-viewer created - clusterrole.rbac.authorization.k8s.io/kiali created - clusterrolebinding.rbac.authorization.k8s.io/kiali created - role.rbac.authorization.k8s.io/kiali-controlplane created - rolebinding.rbac.authorization.k8s.io/kiali-controlplane created - service/kiali created - deployment.apps/kiali created - ``` - -1. Generate traffic for the BookInfo application, to create traffic metrics to view in Kiali: - - ```console - COUNT=25 ; until [ $COUNT -le 0 ]; do curl -s -o /dev/null http://$PROXY_IP ; ((COUNT--)); done - ``` - -1. In a production environment, you'd access the Kiali dashboard through the Kong -ingress. But this sample version of Kiali is meant for exploring only internal traffic on the -cluster. You can instead use the [port-forwarding][k8s-port-forwarding] functionality that `istioctl` -provides. - - In a new terminal, run: - - ```console - istioctl dashboard kiali - ``` - - This runs a `port-forward` to Kiali in the background and opens it in your web browser. - The response should look like: - - ```console - http://localhost:20001/kiali - ``` - - If http://localhost:20001/kiali doesn't open automatically -in your browser, navigate manually to the address. - -You're now connected to Kiali and have a window into the traffic moving across -your mesh network. Familiarize yourself with Kiali and graphically view the -topology for your `BookInfo` application's web requests: - -- Choose _Workloads_ from the menu on the left. -- Select `bookinfo` in the _Namespace_ drop-down menu. -- Select the _productpage-v1_ service name. -- In the top-right corner change the `Last