diff --git a/.gitignore b/.gitignore index cb908f1..807a10c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -_dev-env irsa-operator # Binaries for programs and plugins diff --git a/_dev-env/.gitignore b/_dev-env/.gitignore new file mode 100644 index 0000000..1cf2c41 --- /dev/null +++ b/_dev-env/.gitignore @@ -0,0 +1,2 @@ +k8s-pki +webhook diff --git a/_dev-env/Makefile b/_dev-env/Makefile new file mode 100644 index 0000000..f25224d --- /dev/null +++ b/_dev-env/Makefile @@ -0,0 +1,25 @@ +start_kind: + sudo rm -rf ./k8s-pki + mkdir ./k8s-pki + kind create cluster --config ./kind-config.yml + sudo chmod 644 ./k8s-pki/sa.* + +start_docker_compose: + USER_ID=$(shell id -u) GROUP_ID=$(shell id -g) docker-compose up -d + +register_oidc: + AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test AWS_REGION=us-east-1 aws --endpoint-url=http://localhost:4566 iam create-open-id-connect-provider --url https://hydra.local:4444 --client-id-list sts.amazonaws.com --thumbprint-list $(shell ./get-hydra-thumbprint.sh) + +check: + AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test AWS_REGION=us-east-1 PAGER= aws --no-cli-pager --endpoint-url=http://localhost:4566 iam list-open-id-connect-providers + +wait_for_localstack: + ./wait-for-localstack.sh + echo "localstack ready" + +start: start_kind start_docker_compose wait_for_localstack register_oidc + +tear_down: + kind delete clusters irsa-operator + USER_ID=$(shell id -u) GROUP_ID=$(shell id -g) docker-compose down + sudo rm -rf ./k8s-pki diff --git a/_dev-env/README.md b/_dev-env/README.md new file mode 100644 index 0000000..90db340 --- /dev/null +++ b/_dev-env/README.md @@ -0,0 +1,130 @@ +# dev env + +## caveats +- localstack (community edition) doesn't enforce IAM +- k8s version compatibility issue with + +## clean up +``` +sudo rm -rf ./k8s-pki +mkdir ./k8s-pki +``` + +## start the k8s cluster + +``` +kind create cluster --config ./kind-config.yml +sudo chmod 644 ./k8s-pki/sa.* +``` + +- it will create the kubernetes cluster, the `kind` docker network we'll join later, populate the `./k8s-pki/` folder with all the kubernetes pki keys. +- `kubectl get nodes` should return a `Ready` node. + + +## start the other services + +we'll start 3 other services : +- aws localstack to fake aws +- hydra to have an oidc provider +- a local container registry (accessible from the outside at `localhost:5000`, from inside the `kind` network at `local-registry:5000`) + +``` +docker-compose up -d +``` + +2 short-lived containers will : +- setup hydra's sqlite +- load the serviceaccount `sa` keys in hydra + +### check + +a `docker ps` should only return only 3 containers : `hydra`, `aws-localstack` & `kind` + +if you see one of the 2 other ones restarting, they have a problem, check their logs : +- `hydra-db-migrate` logs should print `Successfully applied migrations!` +- `hydra-add-keys` logs should print `JSON Web Key Set successfully imported!` + +``` +curl https://localhost:4444/.well-known/openid-configuration -k +curl https://localhost:4444/.well-known/jwks.json -k +``` + +should return no error + +## register the oidc provider on aws + +register hydra as an oidc provider + +``` +export AWS_ACCESS_KEY_ID=test +export AWS_SECRET_ACCESS_KEY=test +export AWS_REGION=us-east-1 +aws --endpoint-url=http://localhost:4566 iam create-open-id-connect-provider --url https://hydra.local:4444 --client-id-list sts.amazonaws.com --thumbprint-list $(./get-hydra-thumbprint.sh) +``` + +NB : with set the client-id used by AWS to a value provided to the api-server (see ./kind-config.yml) + +### check +``` +aws --endpoint-url=http://localhost:4566 iam list-open-id-connect-providers +``` +should return + +``` +{ + "OpenIDConnectProviderList": [ + { + "Arn": "arn:aws:iam::000000000000:oidc-provider/hydra.local:4444" + } + ] +} +``` + +you can also get details using +``` +aws --endpoint-url=http://localhost:4566 iam get-open-id-connect-provider --open-id-connect-provider-arn arn:aws:iam::000000000000:oidc-provider/hydra.local:4444 +``` + +## aws setup +create : s3 bucket, upload this README, full-access to s3 bucket policy, role with the oidc provider, attach policy to role + +``` +aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket irsa-test +aws --endpoint-url=http://localhost:4566 s3 cp ./README.md s3://irsa-test +aws --endpoint-url=http://localhost:4566 iam create-policy --policy-name my-test-policy --policy-document file://./test/policy.json +aws --endpoint-url=http://localhost:4566 iam create-role --role-name my-app-role --assume-role-policy-document file://./test/trust-role.json +aws --endpoint-url=http://localhost:4566 iam attach-role-policy --role-name my-app-role --policy-arn arn:aws:iam::000000000000:policy/my-test-policy +``` + +## setup the webhook + +``` +cd ./webhook +./deploy.sh +cd .. +``` + +## deploy irsa-tester +``` +kubectl create -f ./test/irsa-tester.yml +``` + +### check +``` +k exec irsa-tester -- env | grep AWS +``` + +should return +``` +AWS_ROLE_ARN=arn:aws:iam::000000000000:role/my-app-role +AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token +``` + + +## resources + +https://blog.mikesir87.io/2020/09/eks-pod-identity-webhook-deep-dive/ + +https://www.eksworkshop.com/beginner/110_irsa/ + +https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html diff --git a/_dev-env/docker-compose.yml b/_dev-env/docker-compose.yml new file mode 100644 index 0000000..4cf1f9f --- /dev/null +++ b/_dev-env/docker-compose.yml @@ -0,0 +1,88 @@ +version: '3.7' + +services: + local-registry: + image: registry:2 + ports: + - "5000:5000" + restart: unless-stopped + + # AWS + aws-local: + image: localstack/localstack:0.12.12 + ports: + - "4566:4566" + environment: + - SERVICES=iam,s3,sts + - DEBUG=1 + + # OIDC + hydra.local: + image: oryd/hydra:v1.9.0-alpha.3-sqlite + ports: + - "4444:4444" # Public port + - "4445:4445" # Admin port + - "5555:5555" # Port for hydra token user + environment: + - DSN=sqlite:///var/lib/sqlite/db.sqlite?_fk=true + - SERVE_TLS_KEY_PATH=/etc/config/certs/hydra.local.key + - SERVE_TLS_CERT_PATH=/etc/config/certs/hydra.local.crt + user: "${USER_ID}:${GROUP_ID}" + command: + serve -c /etc/config/hydra.yml all + volumes: + - type: volume + source: hydra-sqlite + target: /var/lib/sqlite + read_only: false + - type: bind + source: ./oidc-provider/hydra.yml + target: /etc/config/hydra.yml + - type: bind + source: ./oidc-provider/tls + target: /etc/config/certs + restart: unless-stopped + depends_on: + - hydra-migrate-db + + hydra-migrate-db: + image: oryd/hydra:v1.9.0-alpha.3-sqlite + environment: + - DSN=sqlite:///var/lib/sqlite/db.sqlite?_fk=true + user: "${USER_ID}:${GROUP_ID}" + command: + migrate -c /etc/config/hydra.yml sql -e --yes + volumes: + - type: volume + source: hydra-sqlite + target: /var/lib/sqlite + read_only: false + - type: bind + source: ./oidc-provider/hydra.yml + target: /etc/config/hydra.yml + restart: on-failure + + hydra-add-keys: + image: oryd/hydra:v1.9.0-alpha.3-sqlite + environment: + - DSN=sqlite:///var/lib/sqlite/db.sqlite?_fk=true + - HYDRA_ADMIN_URL=https://hydra.local:4445 + user: "${USER_ID}:${GROUP_ID}" + command: + keys import my-set /etc/pki/sa.key /etc/pki/sa.pub --skip-tls-verify + volumes: + - type: bind + source: ./k8s-pki + target: /etc/pki + restart: on-failure + depends_on: + - hydra.local + + +volumes: + hydra-sqlite: + +networks: + default: + external: + name: kind diff --git a/_dev-env/get-hydra-thumbprint.sh b/_dev-env/get-hydra-thumbprint.sh new file mode 100755 index 0000000..8f6e171 --- /dev/null +++ b/_dev-env/get-hydra-thumbprint.sh @@ -0,0 +1 @@ +openssl s_client -connect localhost:4444 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | sed 's/.*=\|://g' diff --git a/_dev-env/kind-config.yml b/_dev-env/kind-config.yml new file mode 100644 index 0000000..20a63b6 --- /dev/null +++ b/_dev-env/kind-config.yml @@ -0,0 +1,24 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: irsa-operator +kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-issuer: "https://hydra.local:4444" + service-account-key-file: "/etc/kubernetes/pki/sa.pub" + service-account-signing-key-file: "/etc/kubernetes/pki/sa.key" + api-audiences: "sts.amazonaws.com" + +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] + endpoint = ["http://local-registry:5000"] + +nodes: +- role: control-plane + image: kindest/node:v1.20.7 + extraMounts: + - hostPath: ./k8s-pki/ + containerPath: /etc/kubernetes/pki diff --git a/_dev-env/oidc-provider/hydra.yml b/_dev-env/oidc-provider/hydra.yml new file mode 100644 index 0000000..06d1ccf --- /dev/null +++ b/_dev-env/oidc-provider/hydra.yml @@ -0,0 +1,25 @@ +serve: + cookies: + same_site_mode: Lax + +urls: + self: + issuer: https://hydra.local:4444 + +secrets: + system: + - youReallyNeedToChangeThis + +oidc: + subject_identifiers: + supported_types: + - pairwise + - public + pairwise: + salt: youReallyNeedToChangeThis + +webfinger: + oidc_discovery: + supported_claims: + - sub + - iss diff --git a/_dev-env/oidc-provider/tls/README.md b/_dev-env/oidc-provider/tls/README.md new file mode 100644 index 0000000..181f2a2 --- /dev/null +++ b/_dev-env/oidc-provider/tls/README.md @@ -0,0 +1,14 @@ +key & cert used for ory tls (mandatory to be added as an oidc provider on aws) + + +``` +openssl genrsa -out hydra.local.key 4096 +openssl req -new -key hydra.local -out hydra.local.csr + +openssl req -new -x509 -sha256 -key key.pem -out cert.crt -days 365 -subj "/CN=hydra" +``` + +(old school, should use SAN instead [https://geekflare.com/san-ssl-certificate/](https://geekflare.com/san-ssl-certificate/) ) + +## todo +CN should include port ? (`4444`) diff --git a/_dev-env/oidc-provider/tls/hydra.local.crt b/_dev-env/oidc-provider/tls/hydra.local.crt new file mode 100644 index 0000000..e8f09a6 --- /dev/null +++ b/_dev-env/oidc-provider/tls/hydra.local.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFPTCCAyUCFGapjFo1S6WfW2F+Ldv8EZlXj64SMA0GCSqGSIb3DQEBCwUAMFsx +CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl +cm5ldCBXaWRnaXRzIFB0eSBMdGQxFDASBgNVBAMMC2h5ZHJhLmxvY2FsMB4XDTIw +MTIyMzIwMjEzMFoXDTIxMTIyMzIwMjEzMFowWzELMAkGA1UEBhMCQVUxEzARBgNV +BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0 +ZDEUMBIGA1UEAwwLaHlkcmEubG9jYWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDD1xNXxGDUhO03+hHm3XKcu5WldW5LWUw75z/0qzXrjPUfDSyieCi6 +9YcU5jstWU9zApvIU0pw75MZgdoh+KRELfrprnAJIkNtnAN2AHUVqplTE9uyvkGc +trMVHzZe6GZJdosSKFZvwzEhbBek1JLHFB+1FCydVhxzagK3SK1YzW9ZoMP3e58f +Bbg6UvKFgJe+h17jbarbLCvxj5+HdPV6QI4+pJSZYU3jPlxhTGG4c9p39BMv24SD +9Kkx0GM/4gW+pM+GZgYpDyr3nJi8wV8/Cv3kBy8hRwpKy0vqoa2kZ669PoUKO0Mc +mLFMCvvMPkuL3/40/Qo3BUmRx0exqC/C/bKg3uhZ8Zm5q2gO5SAjeWrO5xJfdd6w +E/pAjdTQ8Syqp3DKbY7Roz9VQOKtoLJdcVaozUOQ5ET0ESOZVVgSeP6MzCb52RQk +h/JAtSFnd1xopjquVGJUm9K9FGiyufI2Uv7e18Yeq74Yh/HEI1pWVmtO/niEmwui +HODElQV5aRCX0BRLcYegFlFlnFp5ti+wxH7KgGalVvykc68fXO1NKf2qWw5G/mJ2 +mKWm5pOpPLLYjhowHc9nDOQaehhNVlA3ZRaYRJwmPPpAVkjwr8pCnkvI28Im5ZQX +wGBAB4sbelIUyDn5/Jd72ZsGT5QGuaTLT07pBtVzmCSKkk3rqOla0wIDAQABMA0G +CSqGSIb3DQEBCwUAA4ICAQC2v9hbOvrU4yj5lXrpcZIyWDHOg1jjMuolVIWLnWkp +io2FwuAAAzu87WDLaS4xHHveWFI5KgAK3MPJvewPZqhxOdp8MlcGKQTpc2OlXbcQ +dMUHw1rqJaip4nr6uBy3qp1rJz+luPCqAcC50AUb3F7EyIbIFD/OuR36ZkdVN2+R +CxBnstQyRLigvq3juAE5wDw6io1062Y4/3lEqIBLybKZft/WR4BnCcamCY0Wo/w6 +7y05JQ3knkCos8SZ+OLW4tK8jlALiB51fKtZdkPpK4wA5KgcuJ2aYIW7iCwK31sU +DnwYyHrBUWS91d15MnmgYtpiKlHDrWaUqO+2FmbtN12nyc2fFFlESwGQSInZuzZ/ +Z9eTYeq9cSIa1vOlmGDcunHOvDnRqYbNTHlGXdQ13B5RjtQQTliIQ1DZHuyrpJIi +Yb/QZRvm0C6+ZI7N1I9sxwL6mZoTBEggU621XYfC7J4mjWGEsg2/WYe69pWMaOmV +v0XUS0SnnmsJtllvLY3mbgNWz7kWW+JQeHi3x7HDSNhj9ZE3VuY9mjZAsa7kRrkW +OoWT1TH9tNWkqjTQU2fto3rQFl/DbaEvRnXNhx5jngm7I5i0MP1dM2XCEBs3vMkm +zdTtmADjuMmk6fgBz0C5dPklVzOTkhvanMzLY0vaa8jBfih3AUcxILl+V3XbsQxt +2Q== +-----END CERTIFICATE----- diff --git a/_dev-env/oidc-provider/tls/hydra.local.csr b/_dev-env/oidc-provider/tls/hydra.local.csr new file mode 100644 index 0000000..8e9da44 --- /dev/null +++ b/_dev-env/oidc-provider/tls/hydra.local.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEoDCCAogCAQAwWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx +ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEUMBIGA1UEAwwLaHlk +cmEubG9jYWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDD1xNXxGDU +hO03+hHm3XKcu5WldW5LWUw75z/0qzXrjPUfDSyieCi69YcU5jstWU9zApvIU0pw +75MZgdoh+KRELfrprnAJIkNtnAN2AHUVqplTE9uyvkGctrMVHzZe6GZJdosSKFZv +wzEhbBek1JLHFB+1FCydVhxzagK3SK1YzW9ZoMP3e58fBbg6UvKFgJe+h17jbarb +LCvxj5+HdPV6QI4+pJSZYU3jPlxhTGG4c9p39BMv24SD9Kkx0GM/4gW+pM+GZgYp +Dyr3nJi8wV8/Cv3kBy8hRwpKy0vqoa2kZ669PoUKO0McmLFMCvvMPkuL3/40/Qo3 +BUmRx0exqC/C/bKg3uhZ8Zm5q2gO5SAjeWrO5xJfdd6wE/pAjdTQ8Syqp3DKbY7R +oz9VQOKtoLJdcVaozUOQ5ET0ESOZVVgSeP6MzCb52RQkh/JAtSFnd1xopjquVGJU +m9K9FGiyufI2Uv7e18Yeq74Yh/HEI1pWVmtO/niEmwuiHODElQV5aRCX0BRLcYeg +FlFlnFp5ti+wxH7KgGalVvykc68fXO1NKf2qWw5G/mJ2mKWm5pOpPLLYjhowHc9n +DOQaehhNVlA3ZRaYRJwmPPpAVkjwr8pCnkvI28Im5ZQXwGBAB4sbelIUyDn5/Jd7 +2ZsGT5QGuaTLT07pBtVzmCSKkk3rqOla0wIDAQABoAAwDQYJKoZIhvcNAQELBQAD +ggIBAI6wLFUBfAqIkGrvFIWhy7PeoDKSK4wSrBgAxa8rvnLdRluiYNKITW56ay0h +WRyntGjmR/4JJ9PZXQSDpZAvajtoO8UOkTjxZgc1IvS3GTbM0BIrl2sADWba9kSm +HjNd9qemzkJ4JwWBq8k0GpwK5uWckEXKtPaDpiNnerqsge9p5e7hLCjL41n+aGVQ +0LjzwUm/nvzxMEx6elHrxREhVZPxnqUzU7LQO4DrizbCJZ5p2WlbX8P7Xbm4mUtz +s0NPW/TYmJH8NIVIzd6+6A75KRQrMtNSuIWIgfokFy7/fEJc9L+COFMAQGTKGiGO +BHcXhvcNVRm+h10q7WwR0KdeAC60/QtgAl763G2zS1/QkN3Oe2eCSfEW1L3Bi3cA +czL1E4iXH2G2YiAEfRe2UbSMcq1ydppMipUs9aXg4XQ88pgSOwqw7Pphz8zKZGjl ++fVcgdMPQRYUs+xpmHZ2BMP/hesUzdp43+EY3kFf5sez6r/uw7DvGL/ojk6A7tBT +uhF4Ok0ocR5PmXMijaSQvi9k/wnSJbMaJRXOavicCShw7gDqrBTyDoSUIX39IqXl +BigpRXuxCEFNqgiKbR8R1647tCLMoqRtiuDKfQXnyBb/3ik9n93Tv+lZtQLbY1oC +B32HQAvftNpAS0DZij1FyBl2Mj9raaW8mI9RR3GIUOCzx5D+ +-----END CERTIFICATE REQUEST----- diff --git a/_dev-env/oidc-provider/tls/hydra.local.key b/_dev-env/oidc-provider/tls/hydra.local.key new file mode 100644 index 0000000..b44f2c5 --- /dev/null +++ b/_dev-env/oidc-provider/tls/hydra.local.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAw9cTV8Rg1ITtN/oR5t1ynLuVpXVuS1lMO+c/9Ks164z1Hw0s +ongouvWHFOY7LVlPcwKbyFNKcO+TGYHaIfikRC366a5wCSJDbZwDdgB1FaqZUxPb +sr5BnLazFR82XuhmSXaLEihWb8MxIWwXpNSSxxQftRQsnVYcc2oCt0itWM1vWaDD +93ufHwW4OlLyhYCXvode422q2ywr8Y+fh3T1ekCOPqSUmWFN4z5cYUxhuHPad/QT +L9uEg/SpMdBjP+IFvqTPhmYGKQ8q95yYvMFfPwr95AcvIUcKSstL6qGtpGeuvT6F +CjtDHJixTAr7zD5Li9/+NP0KNwVJkcdHsagvwv2yoN7oWfGZuatoDuUgI3lqzucS +X3XesBP6QI3U0PEsqqdwym2O0aM/VUDiraCyXXFWqM1DkORE9BEjmVVYEnj+jMwm ++dkUJIfyQLUhZ3dcaKY6rlRiVJvSvRRosrnyNlL+3tfGHqu+GIfxxCNaVlZrTv54 +hJsLohzgxJUFeWkQl9AUS3GHoBZRZZxaebYvsMR+yoBmpVb8pHOvH1ztTSn9qlsO +Rv5idpilpuaTqTyy2I4aMB3PZwzkGnoYTVZQN2UWmEScJjz6QFZI8K/KQp5LyNvC +JuWUF8BgQAeLG3pSFMg5+fyXe9mbBk+UBrmky09O6QbVc5gkipJN66jpWtMCAwEA +AQKCAgBX5iXJY9vuJwdt83CrhUNW8570F6P8b8UWsfFzuJDH+4ldeYVKGtJ1L5SE +nd17ujU9srMHE9M6o10LusuUKcxYyaZv0hp5DwbioH6xLP1gTyToM+aM8HX0h9AT +L+IwcPDQEkPTT4WUPjDEBocKNVr3vaGJndd0etzkNHWpnBYp4HMU8GJkpvjBbMjG +bMprf08cdhfxzhw8qTPEBSmeG7G+QFFX7ni/6vuRfkDqbuUBbAZnhoZCGa39qAd0 +00potuLLMZ/ve8VSEnu/jWrEdapxWlagTVUrA36sGVkmo5CTVPIKAJCoCfdSU3po +XhmsD13LFys6z0QRc+8UXjRgmdNtUOU7YmSUtCpYUTkfOOY7MbhL7Mm00uiq8jwR +t+mcWq3WYaGRpp5yupEqEgw+S6+k0OBErK2/8J61x1O3LNTT6fNMv2lQYOANfir9 +MQZfWXpOVSuv32fNvYAyN90DAJTgXXuhsP3lCdjH/c0bSVDrhiTj4+vcU0W4q8WM +haqPl/qYn/WO+XUYaNLygksK/yAHEzIS7DbjL+BADHEO1eoViu+ETo41SEg24Qa1 +ZbxhGMv3D6ykN72LvhLWgjEYBe/lmGyScO1Mv2yjKrliVWRsoRolwpsDbix+UlBY +nAUE63arbMnhk3WgVNeiGACQaWlWiKsjz9Y7YnSJsaKaU5iMsQKCAQEA8fSb3wzW +ANXwRP1SfQyQI8fkphhMIRLOjFoubZXNmlSJmZ2xN3nHjmOnEQqWjZfFvb9y6U9k +4jZn+AOsG7oBzx5ojBnn9R8UhTt3N8wLpdo1zpGFhgo7QsIm8xTVZf/yVhZAESSP +T6uPt63fV4dDxs2DkECCYtK5/SBrVjV9dSaNVrSakAMbHCyfDnB8We2TNArw46W2 +UIkKeJ+WyHrS9wj7/VusHU79w+7nC0wCEO2Zeg4FOPaB5602qTGKWSmneD8WbceN +CzU3HOm4+MP9zOe3j3waUWhZn6hwlisoDGy5Pxjwxc+ZK9Mk75atBeTTPRk3WWZN +2sdxfSvJryIICQKCAQEAzzU0jCRil7b/bHG/Kkxe6qDSGiWnr8hOJz9l1zdO+8qW +LyYj9TexHmx3ptsArqvZx3n0bTbpAAX64PxclZ4GF2AYq0EtRMHpW8SfLN5v4VVu +iojELASOp1B8k9lTryNMijYX2xzqqmAQAp7C7dEZKoJw0OwSjdeo0FWND7g0iQfP +C38Mb9IMHCbsH5A3HMB/1BCvwcK5GDF59wNJMiNn/frXPCAklUtaUxybLZYlr4IG +fE3KGGoyZ9TGgzrvhaZcm/aGqATghKvnu023Huf28n2RlTuzbpDQxXprc09juG6v +ED/0aSb6CtwxnsSGTjI9jmD/B8M+NOjKCj5FHeoq+wKCAQEAzt8XznQJHZnN3w7A +0o5FR3KHusOXg8eytin8/FFTA5s4MxGzDf2fc80ccOOeqd2UgpIITBdH6GogJtvs +8Z/uIAqd1PvpaZlzUB7x5dEePGYAD2g4w8xHsuKRrtCnTJizuIs5p+6cYcAMFz4g +cZQDjRz2WgXt9VS7FQP9Ai6l395ghjvQ00T0jvhP3maqy7DnZV3dbKao5N7nBw9c +OnlZm0IhQhR5am25ZycCgJQySAr+aG9IY9XQaCefTrY+1dFZkORP5vjxTR3oy13C +e2AHxoRwW8v+LJCN4IaCDMNFcKodESff5SGEkwXq1Lb9pMZrHhhLTLK96ksQa7de +r9DnEQKCAQAsQESUmUCk/ePWmQQmWGD84pN8GgaRt8lZAGUTbTBk/YLNVuve9qHQ +yYFUFPk0VGcR/q2AlXV1F9HYMozjNvg+Dbu29xhf0rmva3JC9s4V5VTSKRsb+20r +tjikqokAoh06dwrpNGe5MwK4TId8mmcxeOH2/DOKL0BSHbMeZExz50MERYG2b7k+ +WeGbbAUZEtM5ysZ+LsOwswBFDGKZXo5a4zMMGCE38Pu9PCK3vZUWwkH4UfSS+jsB +iaNLa06z0C9+70santAEBWJb0eEFrKxEZSSuJi3PswWi5t4DpjS5somYgtADlNtZ +bmT6byXvlTMoMPbGBaBHVF+xiFLibJv5AoIBABthLq37v1Sg4rGZo2AR0Gg9WpKt +WI7NO2oOLjmWBN+KsjWAyvEHTefDD1rxVwCAGRl8SCLodO+m6aaVKSiBcBlLf56u +oEXq83Z0qSjMHDZstsZB3cnXa+xal1rIRQ78dLM+Kmzlct0uNd1HdrQVnjMwR9aB +2fy/phTz8CUTPJy0XWOLTJqNJq5rCX8ysqEqmh2YKE2j1QJNmUp/xdVzSFU+G6Zz +1TevyPG3p71KUhXd0Y6ImljHUo7hQXXYxLDjgxhJfP4aAgv+li/GerjhIyGpXDEC +iQcBTRH6eZ9ka+Mn9tKz1W548XLac2NfZWtPojk+BzmYYr8NELI+AiA7b/g= +-----END RSA PRIVATE KEY----- diff --git a/_dev-env/test/irsa-tester.yml b/_dev-env/test/irsa-tester.yml new file mode 100644 index 0000000..18bbd0f --- /dev/null +++ b/_dev-env/test/irsa-tester.yml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: irsa-tester-sa + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/my-app-role +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + run: irsa-tester + name: irsa-tester +spec: + serviceAccountName: irsa-tester-sa + containers: + - image: praqma/network-multitool + name: irsa-tester + restartPolicy: Always + diff --git a/_dev-env/test/policy.json b/_dev-env/test/policy.json new file mode 100644 index 0000000..70a1618 --- /dev/null +++ b/_dev-env/test/policy.json @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1608740634481", + "Action": ["s3:*", "iam:*"], + "Effect": "Allow", + "Resource": "arn:aws:s3:::irsa-test/*" + } + ] +} diff --git a/_dev-env/test/test-irsa.sh b/_dev-env/test/test-irsa.sh new file mode 100644 index 0000000..4f2b88b --- /dev/null +++ b/_dev-env/test/test-irsa.sh @@ -0,0 +1,26 @@ +echo "aws cli version :" +aws --version + +echo +echo "aws env vars :" +env | grep AWS + +echo +echo "get credentials" +aws --endpoint-url=http://aws-local:4566 sts assume-role-with-web-identity --role-arn $AWS_ROLE_ARN --role-session-name $(head /dev/urandom | tr -dc a-z | head -c10) --web-identity-token file://$AWS_WEB_IDENTITY_TOKEN_FILE --duration-seconds 1000 > /tmp/my-creds + + +echo +echo "pass aws creds to env vars :" +export AWS_ACCESS_KEY_ID=$(cat /tmp/my-creds | jq -r '.Credentials.AccessKeyId') +export AWS_SECRET_ACCESS_KEY=$(cat /tmp/my-creds | jq -r '.Credentials.SecretAccessKey') +export AWS_SESSION_TOKEN=$(cat /tmp/my-creds | jq -r '.Credentials.SessionToken') + +echo +echo "listable s3 buckets :" +aws --endpoint-url=http://aws-local:4566 s3 ls + + +echo +echo "should be forbidden according to role :" +aws --endpoint-url=http://aws-local:4566 iam us-east-1 list-roles diff --git a/_dev-env/test/trust-role.json b/_dev-env/test/trust-role.json new file mode 100644 index 0000000..1bcd646 --- /dev/null +++ b/_dev-env/test/trust-role.json @@ -0,0 +1,19 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::000000000000:oidc-provider/hydra.local:4444" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "hydra.local:sub": [ + "system:serviceaccount:default:hello-world-app" + ] + } + } + } + ] +} diff --git a/_dev-env/wait-for-localstack.sh b/_dev-env/wait-for-localstack.sh new file mode 100755 index 0000000..d67f40a --- /dev/null +++ b/_dev-env/wait-for-localstack.sh @@ -0,0 +1,3 @@ +until AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test AWS_REGION=us-east-1 aws --no-cli-pager --endpoint-url=http://localhost:4566 sts get-caller-identity > /dev/null 2>&1; do + sleep 1 +done diff --git a/api/v1alpha1/shared_types.go b/api/v1alpha1/shared_types.go index d04d134..060fdee 100644 --- a/api/v1alpha1/shared_types.go +++ b/api/v1alpha1/shared_types.go @@ -4,11 +4,11 @@ package v1alpha1 type CrCondition string var ( - CrSubmitted CrCondition = "" - CrPending CrCondition = "pending" - CrForbidden CrCondition = "forbidden" - CrFailed CrCondition = "failed" - CrOK CrCondition = "created" + CrSubmitted CrCondition = "" + CrPending CrCondition = "pending" + CrProgressing CrCondition = "progressing" + CrError CrCondition = "error" + CrOK CrCondition = "created" ) func (i CrCondition) String() string { diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 72b8cb5..604993e 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -8,3 +8,9 @@ configMapGenerator: - files: - controller_manager_config.yaml name: manager-config +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: localhost:5000/irsa-operator + newTag: latest diff --git a/controllers/policy_controller.go b/controllers/policy_controller.go index b03d50f..fdb73ef 100644 --- a/controllers/policy_controller.go +++ b/controllers/policy_controller.go @@ -61,7 +61,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr { // finalizer registration & execution if policy.IsPendingDeletion() { - if ok := r.executeFinalizerIfPresent(policy); !ok { + if ok := r.executeFinalizerIfPresent(ctx, policy); !ok { return ctrl.Result{Requeue: true}, nil } // ok, no requeue @@ -98,104 +98,90 @@ func (r *PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { // admissionStep does spec validation func (r *PolicyReconciler) admissionStep(ctx context.Context, policy *api.Policy) (ctrl.Result, error) { - r.log.Info("handling submitted IamPolicy (checking values, setting defaults)") - - if err := policy.Validate(r.clusterName); err != nil { // the policy spec is invalid - r.log.Info("invalid spec, passing status to failed") - if err := r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrFailed, Reason: err.Error()}); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{}, nil + if err := policy.Validate(r.clusterName); err != nil { // the policy spec is not valid + ok := r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: err.Error()}) + return ctrl.Result{Requeue: !ok}, nil } // update the role to "pending" - if err := r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrPending, Reason: "passed validation"}); err != nil { - return ctrl.Result{}, err - } - - return ctrl.Result{}, nil + ok := r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrProgressing, Reason: "passed validation"}) + return ctrl.Result{Requeue: !ok}, nil } // reconcilerRoutine is an infinite loop attempting to make the aws IAM policy converge to the policy.Spec func (r *PolicyReconciler) reconcilerRoutine(ctx context.Context, policy *api.Policy) (ctrl.Result, error) { - r.log.Info("reconciler routine") - if policy.Spec.ARN == "" { // no arn in spec, if we find it on aws : we set the spec, otherwise : we create the AWS policy foundARN, err := r.awsPM.GetPolicyARN(policy.PathPrefix(r.clusterName), policy.AwsName(r.clusterName)) if err != nil { - r.logExtErr(err, "failed while attempting to find policy on aws") + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: err.Error()}) return ctrl.Result{Requeue: true}, nil } - if foundARN == "" { // no policy on aws - if err := r.awsPM.CreatePolicy(*policy); err != nil { // we create it - r.logExtErr(err, "failed to create policy on aws") - return ctrl.Result{Requeue: true}, nil + if foundARN == "" { // no policy on aws, we create it + if err := r.awsPM.CreatePolicy(*policy); err != nil { // creation failed + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: "failed to create policy on AWS : " + err.Error()}) + } else { // creation succeeded + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrProgressing, Reason: "policy created on AWS"}) } + return ctrl.Result{Requeue: true}, nil } else { // a policy already exists on aws if ok := r.setPolicyArnField(ctx, foundARN, policy); !ok { // we set the policyARN field - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil // modifying the policyARN field will generate a new event } } } else { // policy arn in spec, we may have to update it on aws policyStatement, err := r.awsPM.GetStatement(policy.Spec.ARN) if err != nil { + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: "get policyStatement on AWS failed : " + err.Error()}) return ctrl.Result{Requeue: true}, nil } if !api.StatementEquals(policy.Spec.Statement, policyStatement) { // policy on aws doesn't correspond to the one in Spec // we update the aws policy if err := r.awsPM.UpdatePolicy(*policy); err != nil { + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: "update policyStatement on AWS failed : " + err.Error()}) return ctrl.Result{Requeue: true}, nil + } else { + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrProgressing, Reason: "update policyStatement on AWS succeeded"}) } } } if policy.Status.Condition != api.CrOK { - r.log.Info("passing policy status to OK") - _ = r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrOK}) + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrOK}) } return ctrl.Result{}, nil } -func (r *PolicyReconciler) executeFinalizerIfPresent(policy *api.Policy) completed { +func (r *PolicyReconciler) executeFinalizerIfPresent(ctx context.Context, policy *api.Policy) completed { if !containsString(policy.ObjectMeta.Finalizers, r.finalizerID) { // no finalizer to execute return true } - r.log.Info("executing finalizer : deleting policy on aws") - arn, err := r.awsPM.GetPolicyARN(policy.PathPrefix(r.clusterName), policy.AwsName(r.clusterName)) if err != nil { - if !k8serrors.IsNotFound(err) { - r.logExtErr(err, "failed to get policy arn") - return false - } else { - return true - } + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: "get Policy on AWS failed : " + err.Error()}) + return false } - if arn == "" { - // already deleted + if arn == "" { // already deleted return true } - // policy found on aws - if err := r.awsPM.DeletePolicy(arn); err != nil { - // it failed for any reason, we requeue - r.logExtErr(err, "failed to delete policy on aws") + // policy found on aws, we delete it + + if err := r.awsPM.DeletePolicy(arn); err != nil { // deletion failed + r.updateStatus(ctx, policy, api.PolicyStatus{Condition: api.CrError, Reason: "delete Policy on AWS failed : " + err.Error()}) return false } - r.log.Info("deleting policy") // let's delete the policy itself if err := r.Delete(context.TODO(), policy); err != nil { if !k8serrors.IsNotFound(err) { - r.logExtErr(err, "delete policy failed") + r.logExtErr(err, "delete policy failed : "+policy.ObjectMeta.GetName()) return false } } - r.log.Info("policy deleted") // it succeeded // we remove our finalizer from the list and update it. @@ -208,9 +194,9 @@ func (r *PolicyReconciler) executeFinalizerIfPresent(policy *api.Policy) complet } // helper function to update a Policy status -func (r *PolicyReconciler) updateStatus(ctx context.Context, Policy *api.Policy, status api.PolicyStatus) error { +func (r *PolicyReconciler) updateStatus(ctx context.Context, Policy *api.Policy, status api.PolicyStatus) bool { Policy.Status = status - return r.Status().Update(ctx, Policy) + return r.Status().Update(ctx, Policy) == nil } func (r *PolicyReconciler) registerFinalizerIfNeeded(role *api.Policy) completed { diff --git a/controllers/role_controller.go b/controllers/role_controller.go index 24db361..3bab4f4 100644 --- a/controllers/role_controller.go +++ b/controllers/role_controller.go @@ -51,8 +51,6 @@ type RoleReconciler struct { // +kubebuilder:rbac:groups=irsa.voodoo.io,resources=roles/finalizers,verbs=update func (r *RoleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = r.log.WithValues("role", req.NamespacedName) - var role *api.Role { // extract role from the request var ok completed @@ -106,33 +104,22 @@ func (r *RoleReconciler) SetupWithManager(mgr ctrl.Manager) error { // admissionStep does spec validation func (r *RoleReconciler) admissionStep(ctx context.Context, role *api.Role) (ctrl.Result, error) { - r.log.Info("admissionStep") - if err := role.Validate(r.clusterName); err != nil { // the role spec is invalid - r.log.Info("invalid spec, passing status to failed") - if err := r.updateStatus(ctx, role, api.RoleStatus{Condition: api.CrFailed, Reason: err.Error()}); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{}, nil + ok := r.updateStatus(ctx, role, api.RoleStatus{Condition: api.CrError, Reason: err.Error()}) + return ctrl.Result{Requeue: !ok}, nil } // update the role to "pending" - if err := r.updateStatus(ctx, role, api.RoleStatus{Condition: api.CrPending, Reason: "passed validation"}); err != nil { - return ctrl.Result{}, err - } - - r.log.Info("successfully set role status to pending") - return ctrl.Result{}, nil + ok := r.updateStatus(ctx, role, api.RoleStatus{Condition: api.CrProgressing, Reason: "passed validation"}) + return ctrl.Result{Requeue: !ok}, nil } // reconcilerRoutine is an infinite loop attempting to make the aws IAM role, with it's attachment converge to the role.Spec func (r *RoleReconciler) reconcilerRoutine(ctx context.Context, role *api.Role) (ctrl.Result, error) { - r.log.Info("reconciler routine") - if role.Spec.RoleARN == "" { // no arn in spec, if we find it on aws : we set the spec, otherwise : we create the AWS role roleExistsOnAws, err := r.awsRM.RoleExists(role.AwsName(r.clusterName)) if err != nil { - _ = r.updateStatus(ctx, role, api.RoleStatus{Condition: role.Status.Condition, Reason: "failed to check if role exists on AWS"}) + r.updateStatus(ctx, role, api.RoleStatus{Condition: api.CrError, Reason: "failed to check if role exists on AWS"}) return ctrl.Result{Requeue: true}, nil } @@ -358,9 +345,9 @@ func (r *RoleReconciler) getRoleFromReq(ctx context.Context, req ctrl.Request) ( } // helper function to update a Role status -func (r *RoleReconciler) updateStatus(ctx context.Context, role *api.Role, status api.RoleStatus) error { +func (r *RoleReconciler) updateStatus(ctx context.Context, role *api.Role, status api.RoleStatus) bool { role.Status = status - return r.Status().Update(ctx, role) + return r.Status().Update(ctx, role) == nil } func (r *RoleReconciler) addEvent(role *api.Role, e Event) { diff --git a/shell.nix b/shell.nix index 56825e6..948e816 100644 --- a/shell.nix +++ b/shell.nix @@ -15,6 +15,9 @@ let url = "git@github.com:VoodooTeam/nix-pkgs.git"; ref = "master"; }) stable; + + unstable = import (builtins.fetchTarball https://nixos.org/channels/nixos-unstable/nixexprs.tar.xz) {}; + in stable.mkShell { @@ -25,6 +28,7 @@ let nightly.gopls nightly.asmfmt nightly.errcheck + unstable.awscli2 # operator-sdk cli voodoo.operator-sdk_1_3_0