diff --git a/charts/controller/templates/_helpers.tpl b/charts/controller/templates/_helpers.tpl index 62f2d8781..e9929922b 100644 --- a/charts/controller/templates/_helpers.tpl +++ b/charts/controller/templates/_helpers.tpl @@ -3,9 +3,9 @@ env: - name: REGISTRATION_MODE value: {{ .Values.registrationMode }} -# Environmental variable value for $INGRESS_CLASS -- name: "DRYCC_INGRESS_CLASS" - value: "{{ .Values.global.ingressClass }}" +# Environmental variable value for $GATEWAY_CLASS +- name: "DRYCC_GATEWAY_CLASS" + value: "{{ .Values.global.gatewayClass }}" - name: "DRYCC_PLATFORM_DOMAIN" value: "{{ .Values.global.platformDomain }}" - name: "K8S_API_VERIFY_TLS" diff --git a/charts/controller/templates/controller-certificate.yaml b/charts/controller/templates/controller-certificate.yaml deleted file mode 100644 index 932092632..000000000 --- a/charts/controller/templates/controller-certificate.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.global.certManagerEnabled }} -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: drycc-controller -spec: - secretName: drycc-controller-auto-tls - issuerRef: - name: drycc-cluster-issuer - kind: ClusterIssuer - dnsNames: - - drycc.{{ .Values.global.platformDomain }} - privateKey: - rotationPolicy: Always -{{- end }} diff --git a/charts/controller/templates/controller-clusterrole.yaml b/charts/controller/templates/controller-clusterrole.yaml index 79f3a66b0..153793793 100644 --- a/charts/controller/templates/controller-clusterrole.yaml +++ b/charts/controller/templates/controller-clusterrole.yaml @@ -12,7 +12,7 @@ rules: verbs: ["get", "list", "create", "delete"] - apiGroups: [""] resources: ["services"] - verbs: ["get", "list", "create", "update", "delete"] + verbs: ["get", "list", "create", "patch", "update", "delete"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list"] @@ -21,7 +21,7 @@ rules: verbs: ["list", "create"] - apiGroups: [""] resources: ["secrets"] - verbs: ["list", "get", "create", "update", "delete"] + verbs: ["list", "get", "create", "patch", "update", "delete"] - apiGroups: [""] resources: ["replicationcontrollers"] verbs: ["get", "list", "create", "update", "delete"] @@ -79,7 +79,8 @@ rules: - apiGroups: ["servicecatalog.k8s.io"] resources: ["serviceinstances", "servicebindings"] verbs: ["get", "list", "watch", "create", "delete", "patch", "update"] -- apiGroups: ["traefik.containo.us"] - resources: ["middlewares"] - verbs: ["get", "list", "watch", "create", "delete", "patch", "update"] +- apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes", "grocroutes", "tcproutes", "udproutes", "tlsroutes"] + verbs: ["get", "patch", "list", "create", "update", "delete"] {{- end -}} + diff --git a/charts/controller/templates/controller-ingress.yaml b/charts/controller/templates/controller-ingress.yaml deleted file mode 100644 index 4b2024e3b..000000000 --- a/charts/controller/templates/controller-ingress.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: "controller-api-server" - labels: - app: "controller" - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - annotations: - kubernetes.io/tls-acme: "true" -spec: - {{- if not (eq .Values.global.ingressClass "") }} - ingressClassName: "{{ .Values.global.ingressClass }}" - {{- end }} - rules: - - host: drycc.{{ .Values.global.platformDomain }} - http: - paths: - - pathType: Prefix - {{- if eq .Values.global.ingressClass "gce" "alb" }} - path: /* - {{- else }}{{/* Has annotations but ingress class is not "gce" nor "alb" */}} - path: / - {{- end }} - backend: - service: - name: drycc-controller-api - port: - number: 80 - {{- if .Values.global.certManagerEnabled }} - tls: - - secretName: drycc-controller-auto-tls - hosts: - - drycc.{{ .Values.global.platformDomain }} - {{- end }} diff --git a/charts/controller/templates/controller-route.yaml b/charts/controller/templates/controller-route.yaml new file mode 100644 index 000000000..e2d0aef5f --- /dev/null +++ b/charts/controller/templates/controller-route.yaml @@ -0,0 +1,32 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: controller-api-server + labels: + app: "controller" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + hostnames: + - drycc.{{ .Values.global.platformDomain }} + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: drycc-gateway + sectionName: drycc-gateway-listener-http +{{- if .Values.global.certManagerEnabled }} + - group: gateway.networking.k8s.io + kind: Gateway + name: drycc-gateway + sectionName: drycc-gateway-listener-https +{{- end }} + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - name: drycc-controller-api + port: 80 + diff --git a/charts/controller/values.yaml b/charts/controller/values.yaml index 1148978a4..a471b96be 100644 --- a/charts/controller/values.yaml +++ b/charts/controller/values.yaml @@ -163,6 +163,8 @@ global: registrySecretPrefix: "private-registry" # Role-Based Access Control for Kubernetes >= 1.5 rbac: false + # GatewayClass is cluster-scoped resource defined by the infrastructure provider. + gatewayClass: "" # Please check `kubernetes.io/ingress.class` ingressClass: "" # A domain name consists of one or more parts. diff --git a/rootfs/api/exceptions.py b/rootfs/api/exceptions.py index 07cf20dec..0c20a5158 100644 --- a/rootfs/api/exceptions.py +++ b/rootfs/api/exceptions.py @@ -36,7 +36,6 @@ def custom_exception_handler(exc, context): if isinstance(exc, Http404): set_rollback() return Response(str(exc), status=status.HTTP_404_NOT_FOUND) - # Call REST framework's default exception handler after specific 404 handling, # to get the standard error response. response = exception_handler(exc, context) diff --git a/rootfs/api/fixtures/test_sharing.json b/rootfs/api/fixtures/test_sharing.json index 4483d9f4b..eba4305d5 100644 --- a/rootfs/api/fixtures/test_sharing.json +++ b/rootfs/api/fixtures/test_sharing.json @@ -13,7 +13,7 @@ "groups": [], "user_permissions": [], "password": "pbkdf2_sha256$10000$SLr4X1T9L3QA$NB4d4a0d+3NZuAwLbdnKGb2z3P/hQrKQHVaGG3zAaMw=", - "email": "autotest@drycc.cc", + "email": "autotest-1@drycc.cc", "date_joined": "2013-11-25T21:58:46.208Z" } }, @@ -31,7 +31,7 @@ "groups": [], "user_permissions": [], "password": "pbkdf2_sha256$10000$FrfwTVAtWPMD$HUfDokMeY37YshdyS3uhDZ+d/r8galU7kNuBfZxJl2s=", - "email": "autotest@drycc.cc", + "email": "autotest-2@drycc.cc", "date_joined": "2013-11-25T21:59:30.760Z" } }, @@ -49,7 +49,7 @@ "groups": [], "user_permissions": [], "password": "pbkdf2_sha256$10000$FrfwTVAtWPMD$HUfDokMeY37YshdyS3uhDZ+d/r8galU7kNuBfZxJl2s=", - "email": "autotest@drycc.cc", + "email": "autotest-3@drycc.cc", "date_joined": "2013-11-25T21:59:30.760Z" } }, diff --git a/rootfs/api/migrations/0001_initial.py b/rootfs/api/migrations/0001_initial.py index 6e1361569..11b3015bf 100644 --- a/rootfs/api/migrations/0001_initial.py +++ b/rootfs/api/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 4.1.7 on 2023-03-17 08:18 +# Generated by Django 4.1.7 on 2023-05-05 03:32 import api.models.app import api.models.certificate @@ -185,6 +185,7 @@ class Migration(migrations.Migration): ('uuid', models.UUIDField(auto_created=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True, verbose_name='UUID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), + ('issuer', models.JSONField(default={'email': 'anonymous@cert-manager.io', 'key_id': '', 'key_secret': '', 'server': 'https://acme-v02.api.letsencrypt.org/directory'})), ('https_enforced', models.BooleanField(null=True)), ('certs_auto_enabled', models.BooleanField(null=True)), ('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.app')), @@ -202,8 +203,7 @@ class Migration(migrations.Migration): ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), - ('port', models.PositiveIntegerField(default=5000)), - ('protocol', models.TextField(default='TCP')), + ('ports', models.JSONField(default=list)), ('procfile_type', models.TextField()), ('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.app')), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), @@ -214,6 +214,28 @@ class Migration(migrations.Migration): 'unique_together': {('app', 'procfile_type')}, }, ), + migrations.CreateModel( + name='Route', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created', models.DateTimeField(auto_now_add=True)), + ('updated', models.DateTimeField(auto_now=True)), + ('kind', models.CharField(choices=[('TLSRoute', 'TCP'), ('TCPRoute', 'TCP'), ('UDPRoute', 'UDP'), ('GRPCRoute', 'HTTPS'), ('HTTPRoute', 'HTTP/HTTPS')], max_length=15)), + ('name', models.CharField(db_index=True, max_length=63)), + ('port', models.PositiveIntegerField()), + ('rules', models.JSONField(default=list)), + ('routable', models.BooleanField(default=True)), + ('parent_refs', models.JSONField(default=list)), + ('procfile_type', models.TextField()), + ('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.app')), + ('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), + ], + options={ + 'ordering': ['-created'], + 'get_latest_by': 'created', + 'unique_together': {('app', 'name')}, + }, + ), migrations.CreateModel( name='Resource', fields=[ @@ -273,13 +295,30 @@ class Migration(migrations.Migration): 'unique_together': {('owner', 'fingerprint')}, }, ), + migrations.CreateModel( + name='Gateway', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created', models.DateTimeField(auto_now_add=True)), + ('updated', models.DateTimeField(auto_now=True)), + ('name', models.CharField(db_index=True, max_length=63)), + ('ports', models.JSONField(default=list)), + ('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.app')), + ('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), + ], + options={ + 'ordering': ['-created'], + 'get_latest_by': 'created', + 'unique_together': {('app', 'name')}, + }, + ), migrations.CreateModel( name='AppSettings', fields=[ ('uuid', models.UUIDField(auto_created=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True, verbose_name='UUID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), - ('routable', models.BooleanField(null=True)), + ('routable', models.BooleanField(default=True)), ('autoscale', models.JSONField(blank=True, default=dict)), ('label', models.JSONField(blank=True, default=dict)), ('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.app')), @@ -292,3 +331,4 @@ class Migration(migrations.Migration): }, ), ] + diff --git a/rootfs/api/models/app.py b/rootfs/api/models/app.py index 0eb9f34a8..08fc0f230 100644 --- a/rootfs/api/models/app.py +++ b/rootfs/api/models/app.py @@ -12,6 +12,8 @@ import requests import string import time +import socket +from contextlib import closing from itertools import groupby from urllib.parse import urljoin @@ -384,95 +386,29 @@ def deploy(self, release, force_deploy=False, rollback_on_failure=True): # noqa err = '(app::deploy): {}'.format(e) self.log(err, logging.ERROR) raise ServiceUnavailable(err) from e - self._update_default_deploys(release, deploys) + for procfile_type, value in deploys.items(): + if procfile_type in ("web", "cmd"): # http + target_port = int(value.get('envs', {}).get('PORT', 5000)) + self._create_default_ingress(procfile_type, target_port) + service = self.service_set.filter(procfile_type=procfile_type).first() + if not service: + continue + old = release.previous() + if old and old.build: + continue + if procfile_type in ("web", "cmd"): + self._verify_http_health(service, **deploys[procfile_type]) + else: + self._verify_tcp_health(service, **deploys[procfile_type]) # cleanup old release objects from kubernetes release.cleanup_old() - def verify_application_health(self, **kwargs): - """ - Verify an application is healthy via the router. - This is only used in conjunction with the kubernetes health check system and should - only run after kubernetes has reported all pods as healthy - """ - # Bail out early if the application is not routable - app_settings = self.appsettings_set.latest() - if not kwargs.get('routable', False) and app_settings.routable: - return - - app_type = kwargs.get('app_type') - self.log( - 'Waiting for router to be ready to serve traffic to process type {}'.format(app_type), - level=logging.DEBUG - ) - - # Get the router host and append healthcheck path - url = 'http://{}:{}'.format(settings.ROUTER_HOST, settings.ROUTER_PORT) - - # if a httpGet probe is available then 200 is the only acceptable status code - if ('livenessProbe' in kwargs.get('healthcheck', {}) and - 'httpGet' in kwargs['healthcheck']['livenessProbe']): - allowed = [200] - handler = kwargs['healthcheck']['livenessProbe']['httpGet'] - url = urljoin(url, handler.get('path', '/')) - req_timeout = handler.get('timeoutSeconds', 1) - else: - allowed = set(range(200, 599)) - allowed.remove(404) - req_timeout = 3 - - # Give the router max of 10 tries or max 30 seconds to become healthy - # Uses time module to account for the timeout value of 3 seconds - start = time.time() - failed = False - headers = { - # set the Host header for the application being checked - not used for actual routing - 'Host': '{}.{}.nip.io'.format(self.id, settings.ROUTER_HOST), - } - for _ in range(10): - try: - # http://docs.python-requests.org/en/master/user/advanced/#timeouts - response = get_session().get(url, timeout=req_timeout, headers=headers) - failed = False - except requests.exceptions.RequestException: - # In case of a failure where response object is not available - failed = True - # We are fine with timeouts and request problems, lets keep trying - time.sleep(1) # just a bit of a buffer - continue - - # 30 second timeout (timeout per request * 10) - if (time.time() - start) > (req_timeout * 10): - break - - # check response against the allowed pool - if response.status_code in allowed: - break - - # a small sleep since router usually resolve within 10 seconds - time.sleep(1) - - # Endpoint did not report healthy in time - if ('response' in locals() and response.status_code == 404) or failed: - # bankers rounding - delta = round(time.time() - start) - self.log( - 'Router was not ready to serve traffic to process type {} in time, waited {} seconds'.format(app_type, delta), # noqa - level=logging.WARNING - ) - return - - self.log( - 'Router is ready to serve traffic to process type {}'.format(app_type), - level=logging.DEBUG - ) - @backoff.on_exception(backoff.expo, ServiceUnavailable, max_tries=3) def logs(self, log_lines=str(settings.LOG_LINES)): """Return aggregated log data for this application.""" + url = "http://{}:{}/logs/{}?log_lines={}".format( + settings.LOGGER_HOST, settings.LOGGER_PORT, self.id, log_lines) try: - url = "http://{}:{}/logs/{}?log_lines={}".format(settings.LOGGER_HOST, - settings.LOGGER_PORT, - self.id, log_lines) r = requests.get(url) # Handle HTTP request errors except requests.exceptions.RequestException as e: @@ -808,43 +744,111 @@ def _set_default_config(self): config.memory = new_memory config.save() - def _create_default_ingress(self, service): + def _create_default_ingress(self, procfile_type, target_port): port = 80 - gateway, created = Gateway.objects.get_or_create(app=self, name=self.id, owner=self.owner) - if created: - gateway.add(port, "HTTP") + # create default service + try: + service = self.service_set.filter(procfile_type=procfile_type).latest() + except Service.DoesNotExist: + service = Service(owner=self.owner, app=self, procfile_type=procfile_type) + service.add_port(port, "TCP", target_port) + service.save() + # create default gateway + try: + gateway = self.gateway_set.filter(name=self.id).latest() + except Gateway.DoesNotExist: + gateway = Gateway(app=self, owner=self.owner, name=self.id) + added, msg = gateway.add(port, "HTTP") + if not added: + raise DryccException(msg) gateway.save() - route, created = Route.objects.get_or_create( - app=self, owner=self.owner, kind="HTTPRoute", name=self.id, - defaults={"port": port, "procfile_type": service.procfile_type} - ) - if created: - route.rules = {"backendRefs": route.get_backend_refs()} + # create default route + try: + self.route_set.filter(name=self.id).latest() + except Route.DoesNotExist: + route = Route(app=self, owner=self.owner, kind="HTTPRoute", name=self.id, + port=port, procfile_type=service.procfile_type) + route.rules = route.default_rules attached, msg = route.attach(gateway.name, port) if not attached: raise DryccException(msg) route.save() - def _update_default_deploys(self, release, deploys): - # Create or update service - for procfile_type, value in deploys.items(): - if procfile_type in ("web", "cmd"): # http - port = 80 - target_port = value.get('envs', {}).get('PORT', 5000) - service, created = Service.objects.get_or_create( - owner=self.owner, app=self, procfile_type=procfile_type, - ) - if created: - service.add_port(port, "TCP", target_port) - service.save() + def _verify_http_health(self, service, **kwargs): + """ + Verify an application is healthy via the svc. + This is only used in conjunction with the kubernetes health check system and should + only run after kubernetes has reported all pods as healthy + """ + + app_type = kwargs.get('app_type') + self.log( + 'Waiting for service to be ready to serve traffic to process type {}'.format(app_type), + level=logging.DEBUG + ) + url = 'http://{}:{}'.format(service.domain, service.ports[0]["port"]) + # if a httpGet probe is available then 200 is the only acceptable status code + if ('livenessProbe' in kwargs.get('healthcheck', {}) and + 'httpGet' in kwargs['healthcheck']['livenessProbe']): + allowed = [200] + handler = kwargs['healthcheck']['livenessProbe']['httpGet'] + url = urljoin(url, handler.get('path', '/')) + req_timeout = handler.get('timeoutSeconds', 1) + else: + allowed = set(range(200, 599)) + allowed.remove(404) + req_timeout = 3 + # Give the svc max of 10 tries or max 30 seconds to become healthy + # Uses time module to account for the timeout value of 3 seconds + start = time.time() + failed = False + response = None + for _ in range(10): + try: + # http://docs.python-requests.org/en/master/user/advanced/#timeouts + response = get_session().get(url, timeout=req_timeout) + failed = False + except requests.exceptions.RequestException: + # In case of a failure where response object is not available + failed = True + # We are fine with timeouts and request problems, lets keep trying + time.sleep(1) # just a bit of a buffer + continue + + # 30 second timeout (timeout per request * 10) + if (time.time() - start) > (req_timeout * 10): + break + + # check response against the allowed pool + if response.status_code in allowed: + break + + # a small sleep since router usually resolve within 10 seconds + time.sleep(1) + + # Endpoint did not report healthy in time + if (response and response.status_code == 404) or failed: + # bankers rounding + delta = round(time.time() - start) + self.log( + 'Router was not ready to serve traffic to process type {} in time, waited {} seconds'.format(app_type, delta), # noqa + level=logging.WARNING + ) + return + + self.log( + 'Router is ready to serve traffic to process type {}'.format(app_type), + level=logging.DEBUG + ) + + def _verify_tcp_health(self, service, **kwargs): + for _ in range(10): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + sock.settimeout(3) + if sock.connect_ex((service.domain, service.ports[0]["port"])) == 0: + break else: - service.refresh_k8s_svc() - self._create_default_ingress(service) - # Wait until application is available in the router - # Only run when there is no previous build / release - old = release.previous() - if old is None or old.build is None: - self.verify_application_health(**deploys[procfile_type]) + time.sleep(3) def _check_deployment_in_progress(self, deploys, force_deploy=False): if force_deploy: diff --git a/rootfs/api/models/gateway.py b/rootfs/api/models/gateway.py index ce17486a1..7687bed4b 100644 --- a/rootfs/api/models/gateway.py +++ b/rootfs/api/models/gateway.py @@ -1,5 +1,6 @@ import logging from django.db import models +from django.conf import settings from django.shortcuts import get_object_or_404 from django.contrib.auth import get_user_model @@ -16,24 +17,24 @@ class Gateway(AuditedModel): app = models.ForeignKey('App', on_delete=models.CASCADE) owner = models.ForeignKey(User, on_delete=models.PROTECT) name = models.CharField(max_length=63, db_index=True) - listeners = models.JSONField(default=dict) + ports = models.JSONField(default=list) def _check_port(self, port, protocol): - for listener in self.listeners: - if listener.port == port: - if (listener["protocol"] == protocol) or ( - listener["protocol"] != "UDP" and protocol != "UDP"): + for item in self.ports: + if item["port"] == port: + if (item["protocol"] == protocol) or ( + item["protocol"] != "UDP" and protocol != "UDP"): return False return True def _get_tls_domain(self, auto_tls): - domains = self.app.domain_set + domains = self.app.domain_set.all() if not auto_tls: domains = domains.exclude(certificate=None) return domains def _get_listener_name(self, port, protocol, suffix=None): - names = [self.app.id, port] + names = [self.app.id, str(port)] if protocol in ("TCP", "TLS", "HTTP"): names.append("TCP") elif protocol in ("HTTPS", ): @@ -47,60 +48,70 @@ def _get_listener_name(self, port, protocol, suffix=None): def add(self, port, protocol): # check port if not self._check_port(port, protocol): - return False, "port is occupied" + return False, {"detail": "port is occupied"} + self.ports.append({"port": port, "protocol": protocol}) + return True, None + + def remove(self, port, protocol): + ports = [] + for item in self.ports: + if item["port"] != port or item["protocol"] != protocol: + ports.append(item) + if len(ports) == len(self.ports): + return False, {"detail": "no matching listener exists"} + self.ports = ports + return True, "" + + @property + def listeners(self): listeners = [] auto_tls = self.app.tls_set.latest().certs_auto_enabled - if protocol in ("TLS", "HTTPS"): - for domain in self._get_tls_domain(auto_tls): - secret_name = f"{self.app.id}-auto-tls" if auto_tls else domain.certificate.name + domains = list(self._get_tls_domain(auto_tls)) + for item in self.ports: + port, protocol = item["port"], item["protocol"] + if item["protocol"] in ("TLS", "HTTPS"): + for domain in domains: + secret_name = (f"{self.app.id}-auto-tls" if + auto_tls else domain.certificate.name) + listeners.append({ + "allowedRoutes": {"namespaces": {"from": "All"}}, + "name": self._get_listener_name(port, protocol, domain.domain), + "port": port, + "hostname": domain.domain, + "protocol": protocol, + "tls": {"certificateRefs": [{"kind": "Secret", "name": secret_name}]}, + }) + else: listeners.append({ "allowedRoutes": {"namespaces": {"from": "All"}}, - "name": self._get_listener_name(port, protocol, domain.domain), + "name": self._get_listener_name(port, protocol), "port": port, - "hostname": domain.domain, "protocol": protocol, - "tls": {"certificateRefs": [{"kind": "Secret", "name": secret_name}]}, }) - if len(listeners) == 0: - return False, "no matching certificate exists" - else: - listeners.append({ - "allowedRoutes": {"namespaces": {"from": "All"}}, - "name": self._get_listener_name(port, protocol), - "port": port, - "protocol": protocol, - }) - self.listeners.extend(listeners) - return True, None - - def remove(self, port, protocol): - listeners = [] - for listener in self.listeners: - if listener["port"] == port and listener["protocol"] == protocol: - for route in self.app.route_set: - for parent_ref in route.parent_refs: - if (parent_ref["name"] == self.name - and parent_ref["sectionName"] == listener["name"]): - return False, "cannot delete a referenced listener" - else: - listeners.append(listener) - if len(listeners) == len(self.listeners): - return False, "no matching listener exists" - self.listeners = listeners - return True, "" + return listeners def refresh_to_k8s(self): try: try: - data = self._scheduler.gateway.get(self.app.id, self.name).json() - self._scheduler.gateway.patch(self.app.id, self.name, **{ - "listeners": self.listeners, - "version": data["metadata"]["resourceVersion"], - }) + data = self._scheduler.gateways.get(self.app.id, self.name).json() + if len(self.listeners) > 0: + self._scheduler.gateways.patch(self.app.id, self.name, **{ + "listeners": self.listeners, + "gateway_class": settings.GATEWAY_CLASS, + "version": data["metadata"]["resourceVersion"], + }) + else: + logger.debug("delete k8s resource when listeners are empty") + self._scheduler.gateways.delete( + self.app.id, self.name, ignore_exception=True) except KubeException: - self._scheduler.gateway.create(self.app.id, self.name, **{ - "listeners": self.listeners, - }) + if len(self.listeners) > 0: + self._scheduler.gateways.create(self.app.id, self.name, **{ + "listeners": self.listeners, + "gateway_class": settings.GATEWAY_CLASS, + }) + else: + logger.debug("skip creating k8s resource when listeners are empty") except KubeException as e: raise ServiceUnavailable('Kubernetes gateway could not be created') from e @@ -110,7 +121,7 @@ def save(self, *args, **kwargs): def delete(self, *args, **kwargs): try: - self._scheduler.gateway.delete(self.app.id, self.name, ignore_exception=False) + self._scheduler.gateways.delete(self.app.id, self.name, ignore_exception=False) except KubeException: logger.log( msg='Kubernetes gateway cannot be deleted: {}'.format(self.name), @@ -139,9 +150,9 @@ class Route(AuditedModel): (key, '/'.join(value)) for key, value in PROTOCOLS_CHOICES.items()]) name = models.CharField(max_length=63, db_index=True) port = models.PositiveIntegerField() - rules = models.JSONField(default=dict) + rules = models.JSONField(default=list) routable = models.BooleanField(default=True) - parent_refs = models.JSONField(default=dict) + parent_refs = models.JSONField(default=list) procfile_type = models.TextField() @property @@ -150,49 +161,22 @@ def protocols(self): raise NotImplementedError("this kind is not supported") return self.PROTOCOLS_CHOICES[self.kind] - def _check_parent(self, parent): - for parent_ref in self.parent_refs: - if parent_ref["name"] == parent["name"]: - if parent["sectionName"] == parent_ref["sectionName"]: - return False, "this listener already exists" - for route in self.app.route_set.exclude(app=self.app, name=self.name): - for parent_ref in route.parent_refs: - if (parent_ref["name"] == parent["name"] - and parent["sectionName"] == parent_ref["sectionName"]): - return False, "this listener has already been referenced" - return True, "" + @property + def default_rules(self): + return [{"backendRefs": self.default_backend_refs}] - def _get_parent_refs(self, gateway_name, port): - gateway = get_object_or_404(self.app.gateway_set, name=gateway_name) - parent_refs = [] - for listener in gateway.listeners: - if listener["port"] == port and listener["protocol"] in self.protocols: - parent_ref = { - "group": "gateway.networking.k8s.io", - "kind": "Gateway", - "name": gateway_name, - "sectionName": listener["name"], - } - ok, msg = self._check_parent(parent_ref) - if not ok: - return parent_refs, msg - parent_refs.append(parent_ref) - return parent_refs, "" - - def _remove_parent_refs(self, gateway_name, port): - gateway = get_object_or_404(self.app.gateway_set, name=gateway_name) - section_names = [] - for listener in gateway.listeners: - if listener["port"] == port and listener["protocol"] in self.protocols: - section_names.append(listener["name"]) - parent_refs = [] - for parent_ref in self.parent_refs: - if (parent_ref["name"] != gateway_name or - parent_ref["sectionName"] not in section_names): - parent_refs.append(parent_ref) - if len(parent_refs) == len(self.parent_refs): - return parent_refs, "no matching listener exists" - return parent_refs, "" + @property + def default_backend_refs(self): + service = get_object_or_404(self.app.service_set, procfile_type=self.procfile_type) + backend_refs = [] + for item in service.ports: + if item["port"] == self.port: + backend_refs.append({ + "kind": "Service", + "name": str(service), + "port": item["port"], + }) + return backend_refs def check_rules(self): service = self.app.service_set.filter( @@ -201,21 +185,99 @@ def check_rules(self): for rule in self.rules: for backend_ref in rule["backendRefs"]: if backend_ref["name"] != str(service) or backend_ref["port"] not in ports: - return False, "backendRefs associated with incorrect service" + return False, {"detail": "backendRefs associated with incorrect service"} return True, "" - def get_backend_refs(self): - service = self.app.service_set.filter( - procfile_type=self.procfile_type).first() - backend_refs = [] - for item in service.ports: - if item["port"] == self.port: - backend_refs.append({ - "kind": "Service", - "name": str(service), - "port": item["port"], + def refresh_to_k8s(self): + parent_refs, http_parent_refs = self._get_all_parent_refs() + tls = self.app.tls_set.latest() + if tls.https_enforced and self.kind == "HTTPRoute": + self._https_enforced_to_k8s(http_parent_refs) + elif self.kind == "HTTPRoute": + parent_refs.extend(http_parent_refs) + self._scheduler.httproute.delete(self.app.id, f"{self.name}-https-redirect") + else: + parent_refs.extend(http_parent_refs) + self._refresh_to_k8s(self.rules, parent_refs) + + def attach(self, gateway_name, port): + ok, msg = self._check_parent(gateway_name, port) + if not ok: + return ok, msg + parent_ref = {"name": gateway_name, "port": port} + if parent_ref in self.parent_refs: + return False, {"detail": "gateway and port already exist in this route"} + else: + self.parent_refs.append(parent_ref) + return True, "" + + def detach(self, gateway_name, port): + parent_ref = {"name": gateway_name, "port": port} + if parent_ref in self.parent_refs: + self.parent_refs.remove(parent_ref) + else: + return False, {"detail": "gateway and port do not exist in this route"} + return True, "" + + def save(self, *args, **kwargs): + ok, msg = self.check_rules() + if not ok: + raise ValueError(msg) + super().save(*args, **kwargs) + self.refresh_to_k8s() + + def delete(self, *args, **kwargs): + try: + k8s_route = getattr(self._scheduler, self.kind.lower()) + k8s_route.delete(self.app.id, self.name, ignore_exception=False) + except KubeException: + logger.log( + msg='Kubernetes {} cannot be deleted: {}'.format(self.kind.lower(), self.name), + level=logging.ERROR, + ) + return super().delete(*args, **kwargs) + + def _check_parent(self, gateway_name, port): + try: + gateway = self.app.gateway_set.filter(name=gateway_name).latest() + except Gateway.DoesNotExist: + return False, {"detail": f"this gateway {gateway_name} does not exist"} + is_listener_allowed = False + for gateway_port in gateway.ports: + if port == gateway_port.get("port") and \ + self.kind.split("Route")[0] in gateway_port.get("protocol"): + is_listener_allowed = True + if not is_listener_allowed: + return False, {"detail": f"this gateway does not allow {self.kind} port {port} bind, \nplease add gateway listener first."} # noqa + for route in self.app.route_set.exclude(app=self.app, name=self.name): + for parent_ref in route.parent_refs: + if parent_ref["name"] == gateway_name and parent_ref["port"] == port: + for protocol in self.protocols: + if protocol in route.protocols: + return False, {"detail": "this listener has already been referenced"} + return True, "" + + def _refresh_to_k8s(self, rules, parent_refs): + try: + k8s_route = getattr(self._scheduler, self.kind.lower()) + hostnames = [domain.domain for domain in self.app.domain_set.all()] + try: + data = k8s_route.get(self.app.id, self.name).json() + k8s_route.patch(self.app.id, self.name, **{ + "rules": rules, + "hostnames": hostnames, + "parent_refs": parent_refs, + "version": data["metadata"]["resourceVersion"], }) - return backend_refs + except KubeException: + k8s_route.create(self.app.id, self.name, **{ + "rules": rules, + "hostnames": hostnames, + "parent_refs": parent_refs, + }) + except KubeException as e: + raise ServiceUnavailable( + f'Kubernetes {self.kind.lower()} could not be created') from e def _https_enforced_to_k8s(self, parent_refs): rules = { @@ -245,78 +307,30 @@ def _https_enforced_to_k8s(self, parent_refs): raise ServiceUnavailable( f'Kubernetes {self.kind.lower()} could not be created') from e - def _refresh_to_k8s(self, parent_refs, rules): - try: - k8s_route = getattr(self._scheduler, self.kind.lower()) - hostnames = [domain.domain for domain in self.app.domain_set] - try: - data = k8s_route.get(self.app.id, self.name).json() - k8s_route.patch(self.app.id, self.name, **{ - "rules": rules, - "hostnames": hostnames, - "parent_refs": parent_refs, - "version": data["metadata"]["resourceVersion"], - }) - except KubeException: - k8s_route.create(self.app.id, self.name, **{ - "rules": rules, - "hostnames": hostnames, - "parent_refs": parent_refs, - }) - except KubeException as e: - raise ServiceUnavailable( - f'Kubernetes {self.kind.lower()} could not be created') from e - - def refresh_to_k8s(self): - parent_refs = self.parent_refs - https_enforced = self.app.tls_set.latest() - if https_enforced and self.kind == "HTTPRoute": - http_parent_refs = [] - for gateway in self.app.gateway_set.filter( - name__in=set([parent_ref["name"] for parent_ref in parent_refs])): - for listener in gateway.listeners: + def _get_all_parent_refs(self): + gateways = {} + for gateway in self.app.gateway_set.filter( + name__in=[item["name"] for item in self.parent_refs]): + gateways[gateway.name] = gateway + parent_refs, http_parent_refs = [], [] + for item in self.parent_refs: + gateway_name, port = item["name"], item["port"] + if gateway_name not in gateways: + continue + gateway = gateways[gateway_name] + for listener in gateway.listeners: + if listener["port"] == port and listener["protocol"] in self.protocols: + parent_ref = { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": gateway_name, + "sectionName": listener["name"], + } if listener["protocol"] == "HTTP" and listener["port"] == 80: - for parent_ref in self.parent_refs: - if (parent_ref["name"] == gateway.name - and parent_ref["sectionName"] == listener["name"]): - http_parent_refs.append(parent_ref) - self._https_enforced_to_k8s(http_parent_refs) - parent_refs = list(set(parent_refs).difference(http_parent_refs)) - elif self.kind == "HTTPRoute": - self._scheduler.httproute.delete(self.app.id, f"{self.name}-https-redirect") - self._refresh_to_k8s(self.rules, parent_refs) - - def attach(self, gateway_name, port): - parent_refs, msg = self._get_parent_refs(port, gateway_name) - if len(parent_refs) == 0: - return False, msg if msg else "no matching listener exists" - self.parent_refs.extend(parent_refs) - return True, "" - - def detach(self, gateway_name, port): - parent_refs, msg = self._remove_parent_refs(port, gateway_name) - if msg: - return False, msg - self.parent_refs = parent_refs - return True, "" - - def save(self, *args, **kwargs): - ok, msg = self.check_rules() - if not ok: - raise ValueError(msg) - super().save(*args, **kwargs) - self.refresh_to_k8s() - - def delete(self, *args, **kwargs): - try: - k8s_route = getattr(self._scheduler, self.kind.lower()) - k8s_route.delete(self.app.id, self.name, ignore_exception=False) - except KubeException: - logger.log( - msg='Kubernetes {} cannot be deleted: {}'.format(self.kind.lower(), self.name), - level=logging.ERROR, - ) - return super().delete(*args, **kwargs) + http_parent_refs.append(parent_ref) + else: + parent_refs.append(parent_ref) + return parent_refs, http_parent_refs class Meta: get_latest_by = 'created' diff --git a/rootfs/api/models/service.py b/rootfs/api/models/service.py index 5558b71d7..2131999f0 100644 --- a/rootfs/api/models/service.py +++ b/rootfs/api/models/service.py @@ -14,7 +14,7 @@ class Service(AuditedModel): owner = models.ForeignKey(User, on_delete=models.PROTECT) app = models.ForeignKey('App', on_delete=models.CASCADE) - ports = models.JSONField(default=dict) + ports = models.JSONField(default=list) procfile_type = models.TextField() class Meta: @@ -25,25 +25,28 @@ class Meta: def __str__(self): return self._svc_name() + @property + def domain(self): + return "{}.{}.svc.{}".format( + self._svc_name(), self._namespace(), settings.KUBERNETES_CLUSTER_DOMAIN + ) + def as_dict(self): - namespace = self._namespace() - svc_name = self._svc_name() - cluster_domain = settings.KUBERNETES_CLUSTER_DOMAIN return { - "domain": f"{svc_name}.{namespace}.svc.{cluster_domain}", + "domain": self.domain, "ports": self.ports, "procfile_type": self.procfile_type, } def port_name(self, port, protocol): - return "%s-%s-%s-%s" % (self.app.id, self.procfile_type, protocol, port) + return "-".join([self.app.id, self.procfile_type, protocol, str(port)]).lower() def add_port(self, port, protocol, target_port): self.ports.append({ "name": self.port_name(port, protocol), "port": port, "protocol": protocol, - "target_port": target_port, + "targetPort": target_port, }) def refresh_k8s_svc(self): @@ -56,10 +59,12 @@ def refresh_k8s_svc(self): self._scheduler.svc.patch(namespace, svc_name, **{ "ports": self.ports, "version": data["metadata"]["resourceVersion"], + "procfile_type": self.procfile_type, }) except KubeException: self._scheduler.svc.create(namespace, svc_name, **{ "ports": self.ports, + "procfile_type": self.procfile_type, }) except KubeException as e: raise ServiceUnavailable('Kubernetes service could not be created') from e diff --git a/rootfs/api/models/tls.py b/rootfs/api/models/tls.py index af5cd1f92..d091b722a 100644 --- a/rootfs/api/models/tls.py +++ b/rootfs/api/models/tls.py @@ -13,17 +13,19 @@ logger = logging.getLogger(__name__) -class TLS(UuidAuditedModel): - DEFAULT_ISSUER = { +def default_issuer(): + return { "email": "anonymous@cert-manager.io", "server": "https://acme-v02.api.letsencrypt.org/directory", "key_id": "", "key_secret": "", } + +class TLS(UuidAuditedModel): owner = models.ForeignKey(User, on_delete=models.PROTECT) app = models.ForeignKey('App', on_delete=models.CASCADE) - issuer = models.JSONField(default=dict) + issuer = models.JSONField(default=default_issuer) https_enforced = models.BooleanField(null=True) certs_auto_enabled = models.BooleanField(null=True) @@ -61,12 +63,12 @@ def _refresh_secret_to_k8s(self): try: try: data = self._scheduler.secret.get(self.app.id, secret_name).json() - self._scheduler.secret.patch(self.app.id, secret_name, **{ + self._scheduler.secret.patch(self.app.id, secret_name, { "secret": self.issuer["key_secret"], "version": data["metadata"]["resourceVersion"], }) except KubeException: - self._scheduler.secret.create(self.app.id, secret_name, **{ + self._scheduler.secret.create(self.app.id, secret_name, { "secret": self.issuer["key_secret"], }) except KubeException as e: @@ -75,6 +77,8 @@ def _refresh_secret_to_k8s(self): def refresh_issuer_to_k8s(self): name = namespace = self.app.id try: + if self.issuer["key_id"] and self.issuer["key_secret"]: + self._refresh_secret_to_k8s() data = copy.copy(self.issuer) data["parent_refs"] = [ { @@ -82,23 +86,22 @@ def refresh_issuer_to_k8s(self): "kind": "Gateway", "name": gateway.name, } - for gateway in self.app.gateway_set + for gateway in self.app.gateway_set.all() ] - if self.issuer["key_id"] and self.issuer["key_secret"]: - self._refresh_secret_to_k8s() try: - data = self._scheduler.issuer.get(namespace, name, **data).json() - data.update({"version": data["metadata"]["resourceVersion"]}) - self._scheduler.issuer.patch(namespace, name, **data) + version = self._scheduler.issuer.get( + namespace, name, ignore_exception=False).json()["metadata"]["resourceVersion"] + data.update({"version": version}) + self._scheduler.issuer.put(namespace, name, **data) except KubeException: self._scheduler.issuer.create(namespace, name, **data) except KubeException as e: - raise ServiceUnavailable('Kubernetes secret could not be created') from e + raise ServiceUnavailable('Kubernetes issuer could not be created') from e def refresh_certificate_to_k8s(self): namespace = name = self.app.id if self.certs_auto_enabled: - hosts = [domain.domain for domain in self.app.domain_set] + hosts = [domain.domain for domain in self.app.domain_set.all()] try: data = self._scheduler.certificate.get(namespace, name).json() version = data["metadata"]["resourceVersion"] @@ -113,5 +116,4 @@ def refresh_certificate_to_k8s(self): @transaction.atomic def save(self, *args, **kwargs): self._check_previous_tls_settings() - self.app.release_set.filter(failed=False).latest().config super(TLS, self).save(*args, **kwargs) diff --git a/rootfs/api/serializers.py b/rootfs/api/serializers.py index 2a8fc5d0d..5ed14810e 100644 --- a/rootfs/api/serializers.py +++ b/rootfs/api/serializers.py @@ -21,8 +21,12 @@ User = get_user_model() logger = logging.getLogger(__name__) -PROTOCOL_MATCH = re.compile(r'^(TCP|UDP|SCTP)$') -PROTOCOL_MISMATCH_MSG = "Currently, the protocol only supports TCP, UDP, and SCTP" +SERVICE_PROTOCOL_MATCH = re.compile(r'^(TCP|UDP|SCTP)$') +SERVICE_PROTOCOL_MISMATCH_MSG = "the service protocol only supports TCP, UDP, and SCTP" +GATEWAY_PROTOCOL_MATCH = re.compile(r'^(HTTP|HTTPS|TCP|TLS|UDP)$') +GATEWAY_PROTOCOL_MISMATCH_MSG = "the gateway protocol only supports HTTP, HTTPS, TCP, TLS and UDP" +ROUTE_PROTOCOL_MATCH = re.compile(r'^(HTTPRoute|TCPRoute|UDPRoute|TLSRoute)$') +ROUTE_PROTOCOL_MISMATCH_MSG = "the route kind only supports HTTPRoute, TCPRoute, UDPRoute, and TLSRoute" # noqa PROCTYPE_MATCH = re.compile(r'^(?P[a-z0-9]+(\-[a-z0-9]+)*)$') PROCTYPE_MISMATCH_MSG = "Process types can only contain lowercase alphanumeric characters" MEMLIMIT_MATCH = re.compile(r'^(?P([1-9][0-9]*[mgMG]))$', re.IGNORECASE) @@ -529,8 +533,8 @@ def validate_port(value): @staticmethod def validate_protocol(value): - if not re.match(PROTOCOL_MATCH, value): - raise serializers.ValidationError(PROTOCOL_MISMATCH_MSG) + if not re.match(SERVICE_PROTOCOL_MATCH, value): + raise serializers.ValidationError(SERVICE_PROTOCOL_MISMATCH_MSG) return value @classmethod @@ -732,10 +736,55 @@ class GatewaySerializer(serializers.Serializer): name = serializers.CharField(max_length=63, required=True) listeners = serializers.JSONField(required=False) + @staticmethod + def validate_port(value): + if not str(value).isnumeric(): + raise serializers.ValidationError('port can only be a numeric value') + elif int(value) not in range(1, 65536): + raise serializers.ValidationError('port needs to be between 1 and 65535') + return value + + @staticmethod + def validate_protocol(value): + if not re.match(GATEWAY_PROTOCOL_MATCH, value): + raise serializers.ValidationError(GATEWAY_PROTOCOL_MISMATCH_MSG) + return value + + @staticmethod + def validate_procfile_type(value): + if not re.match(PROCTYPE_MATCH, value): + raise serializers.ValidationError(PROCTYPE_MISMATCH_MSG) + + return value + class RouteSerializer(serializers.Serializer): app = serializers.SlugRelatedField(slug_field='id', queryset=models.app.App.objects.all()) owner = serializers.ReadOnlyField(source='owner.username') kind = serializers.CharField(max_length=15, required=False) name = serializers.CharField(max_length=63, required=True) + port = serializers.IntegerField() + procfile_type = serializers.CharField(max_length=63, required=True) rules = serializers.JSONField(required=False) + parent_refs = serializers.JSONField(required=False) + + @staticmethod + def validate_port(value): + if not str(value).isnumeric(): + raise serializers.ValidationError('port can only be a numeric value') + elif int(value) not in range(1, 65536): + raise serializers.ValidationError('port needs to be between 1 and 65535') + return value + + @staticmethod + def validate_kind(value): + if not re.match(ROUTE_PROTOCOL_MATCH, value): + raise serializers.ValidationError(ROUTE_PROTOCOL_MISMATCH_MSG) + return value + + @staticmethod + def validate_procfile_type(value): + if not re.match(PROCTYPE_MATCH, value): + raise serializers.ValidationError(PROCTYPE_MISMATCH_MSG) + + return value diff --git a/rootfs/api/settings/production.py b/rootfs/api/settings/production.py index 9b1a3a14a..c85edfc7e 100644 --- a/rootfs/api/settings/production.py +++ b/rootfs/api/settings/production.py @@ -274,8 +274,8 @@ SECRET_KEY = os.environ.get('DRYCC_SECRET_KEY', random_secret) BUILDER_KEY = os.environ.get('DRYCC_BUILDER_KEY', random_secret) -# ingress class name -INGRESS_CLASS = os.environ.get('DRYCC_INGRESS_CLASS', '') +# gateway class name +GATEWAY_CLASS = os.environ.get('DRYCC_GATEWAY_CLASS', '') PLATFORM_DOMAIN = os.environ.get('DRYCC_PLATFORM_DOMAIN', 'local.drycc.cc') @@ -402,10 +402,6 @@ LOGGER_HOST = os.environ.get('DRYCC_LOGGER_SERVICE_HOST', '127.0.0.1') LOGGER_PORT = os.environ.get('DRYCC_LOGGER_SERVICE_PORT_HTTP', 80) -# router information -ROUTER_HOST = os.environ.get('DRYCC_ROUTER_SERVICE_HOST', '127.0.0.1') -ROUTER_PORT = os.environ.get('DRYCC_ROUTER_SERVICE_PORT', 80) - DRYCC_DATABASE_URL = os.environ.get('DRYCC_DATABASE_URL', 'postgres://postgres:@:5432/drycc') DATABASES = { 'default': dj_database_url.config(default=DRYCC_DATABASE_URL) diff --git a/rootfs/api/settings/testing.py b/rootfs/api/settings/testing.py index 1e82d3b72..6b2b37d04 100644 --- a/rootfs/api/settings/testing.py +++ b/rootfs/api/settings/testing.py @@ -25,10 +25,6 @@ SCHEDULER_MODULE = 'scheduler.mock' SCHEDULER_URL = 'http://test-scheduler.example.com' -# router information -ROUTER_HOST = 'drycc-router.example.com' -ROUTER_PORT = 80 - # randomize test database name so we can run multiple unit tests simultaneously DATABASES['default']['NAME'] = "unittest-{}".format(''.join( random.choice(string.ascii_letters + string.digits) for _ in range(8))) diff --git a/rootfs/api/signals.py b/rootfs/api/signals.py index e9584b3c9..633b7bb6e 100644 --- a/rootfs/api/signals.py +++ b/rootfs/api/signals.py @@ -169,9 +169,9 @@ def tls_changed_handle(sender, instance: TLS, created=False, update_fields=None, def gateway_changed_handle( sender, instance: Gateway, created=False, update_fields=None, **kwargs): if created or (not created and update_fields is None): # create or delete - for tls in instance.app.tls_set: + for tls in instance.app.tls_set.all(): tls.refresh_certificate_to_k8s() - for route in instance.app.route_set: + for route in instance.app.route_set.all(): route.refresh_to_k8s() @@ -185,9 +185,9 @@ def service_changed_handle( @receiver(signal=[post_save, post_delete], sender=Domain) def domain_changed_handle( sender, instance: Domain, created=False, update_fields=None, **kwargs): - for gateway in instance.app.gateway_set: + for gateway in instance.app.gateway_set.all(): gateway.refresh_to_k8s() - for route in instance.app.route_set: + for route in instance.app.route_set.all(): route.refresh_to_k8s() @@ -195,7 +195,7 @@ def domain_changed_handle( def appsettings_changed_handle( sender, instance: AppSettings, created=False, update_fields=None, **kwargs): if not created and (update_fields is not None and "routable" in update_fields): - for route in instance.app.route_set: + for route in instance.app.route_set.all(): route.routable = instance.routable route.save() diff --git a/rootfs/api/tests/__init__.py b/rootfs/api/tests/__init__.py index 59684b68f..63df485fc 100644 --- a/rootfs/api/tests/__init__.py +++ b/rootfs/api/tests/__init__.py @@ -4,7 +4,6 @@ import time from os.path import dirname, realpath -from django.conf import settings from django.test.runner import DiscoverRunner from rest_framework.test import APITestCase, APITransactionTestCase @@ -32,11 +31,10 @@ def fake_responses(request, context): return response['text'] -url = 'http://{}:{}'.format(settings.ROUTER_HOST, settings.ROUTER_PORT) adapter = requests_mock.Adapter() -adapter.register_uri('GET', url + '/', text=fake_responses) -adapter.register_uri('GET', url + '/health', text=fake_responses) -adapter.register_uri('GET', url + '/healthz', text=fake_responses) +adapter.register_uri('GET', '/', text=fake_responses) +adapter.register_uri('GET', '/health', text=fake_responses) +adapter.register_uri('GET', '/healthz', text=fake_responses) # Root of the test directory (for files and such) TEST_ROOT = dirname(realpath(__file__)) diff --git a/rootfs/api/tests/test_app.py b/rootfs/api/tests/test_app.py index 9e11a96f4..7015e16d5 100644 --- a/rootfs/api/tests/test_app.py +++ b/rootfs/api/tests/test_app.py @@ -443,11 +443,12 @@ def test_app_verify_application_health_success(self, mock_requests): {'text': 'Not Found', 'status_code': 404}, {'text': 'OK', 'status_code': 200} ] - hostname = 'http://{}:{}/'.format(settings.ROUTER_HOST, settings.ROUTER_PORT) - mr = mock_requests.register_uri('GET', hostname, responses) # create app app_id = self.create_app() + hostname = 'http://{}.{}.svc.{}:80/'.format( + app_id, app_id, settings.KUBERNETES_CLUSTER_DOMAIN) + mr = mock_requests.register_uri('GET', hostname, responses) # deploy app to get verification url = "/v2/apps/{}/builds".format(app_id) @@ -477,12 +478,13 @@ def test_app_verify_application_health_failure_404(self, mock_requests): {'text': 'Not Found', 'status_code': 404}, {'text': 'Not Found', 'status_code': 404}, ] - hostname = 'http://{}:{}/'.format(settings.ROUTER_HOST, settings.ROUTER_PORT) - mr = mock_requests.register_uri('GET', hostname, responses) # create app app_id = self.create_app() + hostname = 'http://{}.{}.svc.{}:80/'.format( + app_id, app_id, settings.KUBERNETES_CLUSTER_DOMAIN) + mr = mock_requests.register_uri('GET', hostname, responses) # deploy app to get verification url = "/v2/apps/{}/builds".format(app_id) body = {'image': 'autotest/example', 'stack': 'container'} @@ -501,12 +503,12 @@ def test_app_verify_application_health_failure_exceptions(self, mock_requests): def _raise_exception(request, ctx): raise requests.exceptions.RequestException('Boom!') - # function tries to hit router 10 times - hostname = 'http://{}:{}/'.format(settings.ROUTER_HOST, settings.ROUTER_PORT) - mr = mock_requests.register_uri('GET', hostname, text=_raise_exception) - # create app app_id = self.create_app() + # function tries to hit router 10 times + hostname = 'http://{}.{}.svc.{}:80/'.format( + app_id, app_id, settings.KUBERNETES_CLUSTER_DOMAIN) + mr = mock_requests.register_uri('GET', hostname, text=_raise_exception) # deploy app to get verification url = "/v2/apps/{}/builds".format(app_id) diff --git a/rootfs/api/tests/test_domain.py b/rootfs/api/tests/test_domain.py index 3795f0c7c..f08830592 100644 --- a/rootfs/api/tests/test_domain.py +++ b/rootfs/api/tests/test_domain.py @@ -7,7 +7,6 @@ from django.contrib.auth import get_user_model from django.core.cache import cache -from django.conf import settings from rest_framework.authtoken.models import Token from api.models.domain import Domain @@ -69,9 +68,7 @@ def test_strip_dot(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]), - expected, msg) + self.assertEqual([domain], expected, msg) def test_manage_idn_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) @@ -107,9 +104,7 @@ def test_manage_idn_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]), - sorted(expected), msg) + self.assertEqual([ace_domain], expected, msg) # Verify creation failure for same domain with different encoding if ace_domain != domain: @@ -130,13 +125,11 @@ def test_manage_idn_domain(self): # Verify removal url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) - self.assertEqual(1, response.data['count'], msg) + self.assertEqual(0, response.data['count'], msg) # verify only app domain is left expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - ["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)], - expected, msg) + self.assertEqual([], expected, msg) # Use different encoding for creating and deleting (ACE) if ace_domain != domain: @@ -148,9 +141,7 @@ def test_manage_idn_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]), - sorted(expected), msg) + self.assertEqual([ace_domain], expected, msg) # Delete url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=ace_domain, @@ -161,13 +152,11 @@ def test_manage_idn_domain(self): # Verify removal url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) - self.assertEqual(1, response.data['count'], msg) + self.assertEqual(0, response.data['count'], msg) # verify only app domain is left expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - ["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)], - expected, msg) + self.assertEqual([], expected, msg) # Use different encoding for creating and deleting (Unicode) if unicode_domain != domain: @@ -179,9 +168,7 @@ def test_manage_idn_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]), - sorted(expected), msg) + self.assertEqual([ace_domain], expected, msg) # Delete url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=unicode_domain, @@ -192,13 +179,11 @@ def test_manage_idn_domain(self): # Verify removal url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) - self.assertEqual(1, response.data['count'], msg) + self.assertEqual(0, response.data['count'], msg) # verify only app domain is left expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - ["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)], - expected, msg) + self.assertEqual([], expected, msg) def test_manage_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) @@ -228,9 +213,7 @@ def test_manage_domain(self): url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]), - sorted(expected), msg) + self.assertEqual([domain], expected, msg) # Delete url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=domain, @@ -241,13 +224,11 @@ def test_manage_domain(self): # Verify removal url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id) response = self.client.get(url) - self.assertEqual(1, response.data['count'], msg) + self.assertEqual(0, response.data['count'], msg) # verify only app domain is left expected = [data['domain'] for data in response.data['results']] - self.assertEqual( - ["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)], - expected, msg) + self.assertEqual([], expected, msg) def test_delete_domain_does_not_exist(self): """Remove a domain that does not exist""" @@ -274,17 +255,10 @@ def test_delete_domain_does_not_remove_latest(self): with self.assertRaises(Domain.DoesNotExist): Domain.objects.get(domain=test_domains[0]) - def test_delete_domain_does_not_remove_default(self): - domain = "%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN) - url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=domain, - app_id=self.app_id) - response = self.client.delete(url) - self.assertEqual(response.status_code, 403, response.data) - def test_delete_domain_does_not_remove_others(self): """https://github.com/drycc/drycc/issues/3475""" self.test_delete_domain_does_not_remove_latest() - self.assertEqual(Domain.objects.all().count(), 2) + self.assertEqual(Domain.objects.all().count(), 1) def test_manage_domain_invalid_app(self): # Create domain diff --git a/rootfs/api/tests/test_gateway.py b/rootfs/api/tests/test_gateway.py new file mode 100644 index 000000000..faea5a40d --- /dev/null +++ b/rootfs/api/tests/test_gateway.py @@ -0,0 +1,379 @@ +import json +import string +import random +from django.contrib.auth import get_user_model +from django.core.cache import cache +from rest_framework.authtoken.models import Token + +from api.tests import TEST_ROOT, DryccTransactionTestCase + +User = get_user_model() + + +class BaseGatewayTest(DryccTransactionTestCase): + fixtures = ['tests.json'] + + def setUp(self): + self.user = User.objects.get(username='autotest') + self.token = Token.objects.get(user=self.user).key + self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) + + def tearDown(self): + # make sure every test has a clean slate for k8s mocking + cache.clear() + + def create_gateway(self, app_id, name, port, protocol): + response = self.client.post( + '/v2/apps/{}/gateways/'.format(app_id), + {'name': name, 'port': port, 'protocol': protocol} + ) + self.assertEqual(response.status_code, 201) + + def create_tls_domain(self, app_id): + cert_url = '/v2/certs' + secret_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(23)) + domain = 'autotest.example.com' + + with open('{}/certs/{}.key'.format(TEST_ROOT, domain)) as f: + key = f.read() + + with open('{}/certs/{}.cert'.format(TEST_ROOT, domain)) as f: + cert = f.read() + response = self.client.post( + cert_url, + { + 'name': secret_name, + 'certificate': cert, + 'key': key + } + ) + self.assertEqual(response.status_code, 201) + + response = self.client.post( + '/v2/apps/{}/domains'.format(app_id), + {'domain': domain} + ) + self.assertEqual(response.status_code, 201) + + response = self.client.post( + '{}/{}/domain/'.format(cert_url, secret_name), + {'domain': domain} + ) + self.assertEqual(response.status_code, 201) + return domain, secret_name + + +class GatewayTest(BaseGatewayTest): + + """Tests push notification from build system""" + + def test_create_gateway(self): + app_id = self.create_app() + self.create_gateway(app_id, 'bing-gateway', 8000, "HTTP") + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(response.data["count"], 1, response.data) + + def test_add_listener(self): + app_id = self.create_app() + self.create_gateway(app_id, 'bing-gateway', 8000, "HTTP") + response = self.client.post( + '/v2/apps/{}/gateways/'.format(app_id), + {'name': 'bing-gateway', 'port': 443, 'protocol': "HTTP"} + ) + self.assertEqual(response.status_code, 201) + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + results = [{ + "app": app_id, + "owner": "autotest", + "name": "bing-gateway", + "listeners": [ + { + "name": "%s-8000-tcp" % app_id, + "port": 8000, + "protocol": "HTTP", + "allowedRoutes": {"namespaces": {"from": "All"}} + }, + { + "name": "%s-443-tcp" % app_id, + "port": 443, + "protocol": "HTTP", + "allowedRoutes": {"namespaces": {"from": "All"}} + }] + }] + self.assertEqual(results, json.loads(json.dumps(response.data["results"]))) + + def add_tls_listener(self, name, protocol): + app_id = self.create_app() + domain, secret_name = self.create_tls_domain(app_id) + response = self.client.post( + '/v2/apps/{}/gateways/'.format(app_id), + {'name': name, 'port': 443, 'protocol': protocol} + ) + self.assertEqual(response.status_code, 201) + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + if protocol == "HTTPS": + listener_name = "%s-443-mix-%s" % (app_id, domain) + else: + listener_name = "%s-443-tcp-%s" % (app_id, domain) + results = [{ + "app": app_id, + "owner": "autotest", + "name": name, + "listeners": [{ + "tls": { + "certificateRefs": [{ + "kind": "Secret", + "name": secret_name + }] + }, + "name": listener_name, + "port": 443, + "hostname": domain, + "protocol": protocol, + "allowedRoutes": { + "namespaces": { + "from": "All" + } + } + }] + }] + self.assertEqual(results, json.loads(json.dumps(response.data["results"]))) + return app_id, domain, secret_name + + def test_add_tls_listener(self): + self.add_tls_listener("tls-gateway", "TLS") + + def test_add_https_listener(self): + self.add_tls_listener("bingo-gateway", "HTTPS") + + def test_remove_domain(self): + app_id, domain, _ = self.add_tls_listener("bingo-gateway", "HTTPS") + url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=domain, + app_id=app_id) + response = self.client.delete(url) + self.assertEqual(response.status_code, 204) + + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(response.data["results"][0]["listeners"], []) + + def test_remove_tls(self): + app_id, domain, secret_name = self.add_tls_listener("bingo-gateway", "HTTPS") + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["listeners"]), 1) + response = self.client.delete( + '{}/{}/domain/{}/'.format('/v2/certs', secret_name, domain) + ) + self.assertEqual(response.status_code, 204) + + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(response.data["results"][0]["listeners"], []) + return app_id + + def test_certs_auto_enabled(self): + app_id = self.test_remove_tls() + data = {'certs_auto_enabled': True} + response = self.client.post( + '/v2/apps/{}/tls'.format(app_id), + data) + self.assertEqual(response.status_code, 201, response.data) + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["listeners"]), 1) + + def test_remove_listener(self): + app_id = self.create_app() + self.create_gateway(app_id, 'bing-gateway', 8000, "HTTP") + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(response.data["count"], 1, response.data) + # delete + response = self.client.delete( + '/v2/apps/{}/gateways/'.format(app_id), + {'name': 'bing-gateway', 'port': 8000, 'protocol': "HTTP"} + ) + self.assertEqual(response.status_code, 204) + response = self.client.get('/v2/apps/{}/gateways/'.format(app_id)) + self.assertEqual(response.data["count"], 0, response.data) + + +class RouteTest(BaseGatewayTest): + + def create_route(self, app_id): + # create service + port = 5000 + procfile_type = "task" + response = self.client.post( + '/v2/apps/{}/services'.format(app_id), + { + 'port': port, + 'protocol': 'TCP', + 'target_port': port, + 'procfile_type': procfile_type + } + ) + self.assertEqual(response.status_code, 201, response.data) + # create route + route_name = "test-route" + response = self.client.post( + '/v2/apps/{}/routes/'.format(app_id), + { + "port": 5000, + "procfile_type": procfile_type, + "kind": "HTTPRoute", + "name": route_name, + } + ) + self.assertEqual(response.status_code, 201) + return procfile_type, port, route_name + + def test_create_route(self): + app_id = self.create_app() + self.create_route(app_id) + # create route error + response = self.client.post( + '/v2/apps/{}/routes/'.format(app_id), + { + "port": 5000, + "procfile_type": "no-exists", + "kind": "HTTPRoute", + "name": "test-route-1", + } + ) + self.assertEqual(response.status_code, 404) + + def test_route_attach(self): + app_id = self.create_app() + _, port, route_name = self.create_route(app_id) + gateway_name_1 = 'bing-gateway-1' + self.create_gateway(app_id, gateway_name_1, 5000, "HTTP") + self.client.patch( + '/v2/apps/{}/routes/{}/attach/'.format(app_id, route_name), + { + "gateway": gateway_name_1, + "port": port + } + ) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["parent_refs"]), 1) + gateway_name_2 = 'bing-gateway-2' + self.create_gateway(app_id, gateway_name_2, 5000, "HTTP") + self.client.patch( + '/v2/apps/{}/routes/{}/attach/'.format(app_id, route_name), + { + "gateway": gateway_name_2, + "port": port + } + ) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["parent_refs"]), 2) + return app_id, gateway_name_1, gateway_name_2, port, route_name + + def test_route_detach(self): + app_id, gateway_name_1, gateway_name_2, port, route_name = self.test_route_attach() + self.client.patch( + '/v2/apps/{}/routes/{}/detach/'.format(app_id, route_name), + { + "gateway": gateway_name_1, + "port": port + } + ) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["parent_refs"]), 1) + + self.client.patch( + '/v2/apps/{}/routes/{}/detach/'.format(app_id, route_name), + { + "gateway": gateway_name_2, + "port": port + } + ) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"][0]["parent_refs"]), 0) + + response = self.client.patch( + '/v2/apps/{}/routes/{}/detach/'.format(app_id, route_name), + { + "gateway": gateway_name_2, + "port": port + } + ) + self.assertEqual(response.status_code, 400) + + def test_route_delete(self): + app_id = self.create_app() + _, _, route_name = self.create_route(app_id) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"]), 1) + response = self.client.delete( + '/v2/apps/{}/routes/{}/'.format(app_id, route_name), + ) + self.assertEqual(response.status_code, 204) + response = self.client.get('/v2/apps/{}/routes/'.format(app_id)) + self.assertEqual(len(response.data["results"]), 0) + + def test_rule_get(self): + app_id = self.create_app() + procfile_type, _, route_name = self.create_route(app_id) + response = self.client.get( + '/v2/apps/{}/routes/{}/rules/'.format(app_id, route_name), + ) + expect = [{ + 'backendRefs': [{ + 'kind': 'Service', + 'name': "%s-%s" % (app_id, procfile_type), + 'port': 5000 + }] + }] + self.assertEqual(response.data, expect) + + def test_rule_set(self): + app_id = self.create_app() + procfile_type, _, route_name = self.create_route(app_id) + expect = [{ + "matches": [ + { + "path": { + "type": "PathPrefix", + "value": "/get" + } + } + ], + 'backendRefs': [ + { + 'kind': 'Service', + 'name': "%s-%s" % (app_id, procfile_type), + 'port': 5000 + } + ] + }] + response = self.client.put( + '/v2/apps/{}/routes/{}/rules/'.format(app_id, route_name), + json.dumps(expect), + content_type="application/json", + ) + response = self.client.get( + '/v2/apps/{}/routes/{}/rules/'.format(app_id, route_name), + ) + self.assertEqual(json.dumps(response.data), json.dumps(expect)) + + expect = [{ + 'backendRefs': [ + { + 'kind': 'Service', + 'name': "%s-%s-noexists" % (app_id, procfile_type), + 'port': 5000 + } + ], + "matches": [ + { + "path": { + "type": "PathPrefix", + "value": "/get" + } + } + ] + }] + response = self.client.put( + '/v2/apps/{}/routes/{}/rules/'.format(app_id, route_name), + json.dumps(expect), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400) diff --git a/rootfs/api/tests/test_services.py b/rootfs/api/tests/test_services.py index 835ce0ace..7c7fa1c25 100644 --- a/rootfs/api/tests/test_services.py +++ b/rootfs/api/tests/test_services.py @@ -1,5 +1,6 @@ from django.contrib.auth import get_user_model from django.core.cache import cache +from django.conf import settings from rest_framework.authtoken.models import Token from api.tests import DryccTransactionTestCase @@ -25,7 +26,6 @@ def tearDown(self): def test_service_basic_ops(self): """Test basic service operations.""" app_id = self.create_app() - # list non-existing services response = self.client.get('/v2/apps/{}/services'.format(app_id)) self.assertEqual(response.status_code, 200, response.data) @@ -36,6 +36,7 @@ def test_service_basic_ops(self): { 'port': 5000, 'protocol': 'UDP', + 'target_port': 5000, 'procfile_type': 'test' } ) @@ -44,38 +45,71 @@ def test_service_basic_ops(self): response = self.client.get('/v2/apps/{}/services'.format(app_id)) self.assertEqual(response.status_code, 200, response.data) self.assertEqual(len(response.data['services']), 1) - expected1 = { - 'port': 5000, - 'protocol': 'UDP', - 'procfile_type': 'test' + expected0 = { + "domain": "%s-%s.%s.svc.%s" % ( + app_id, "test", app_id, settings.KUBERNETES_CLUSTER_DOMAIN), + "ports": [{ + 'name': "%s-%s-%s-%s" % (app_id, "test", 'udp', 5000), + 'port': 5000, + 'protocol': 'UDP', + 'targetPort': 5000, + }], + "procfile_type": "test" } - self.assertDictContainsSubset(expected1, response.data['services'][0]) - # update 1st service + self.assertDictContainsSubset(expected0, response.data['services'][0]) + # port is occupied response = self.client.post( '/v2/apps/{}/services'.format(app_id), { 'port': 5000, 'protocol': 'UDP', + 'target_port': 5000, + 'procfile_type': 'test' + } + ) + self.assertEqual(response.status_code, 400, response.data) + + # add new port + expected1 = { + "domain": "%s-%s.%s.svc.%s" % ( + app_id, "test", app_id, settings.KUBERNETES_CLUSTER_DOMAIN), + "ports": [ + { + 'name': "%s-%s-%s-%s" % (app_id, "test", 'udp', 5000), + 'port': 5000, + 'protocol': 'UDP', + 'targetPort': 5000, + }, + { + 'name': "%s-%s-%s-%s" % (app_id, "test", 'tcp', 6000), + 'port': 6000, + 'protocol': 'TCP', + 'targetPort': 6000, + } + ], + "procfile_type": "test" + } + response = self.client.post( + '/v2/apps/{}/services'.format(app_id), + { + 'port': 6000, + 'protocol': 'TCP', + 'target_port': 6000, 'procfile_type': 'test' } ) self.assertEqual(response.status_code, 204, response.data) - # list 1st service and get new value response = self.client.get('/v2/apps/{}/services'.format(app_id)) self.assertEqual(response.status_code, 200, response.data) - self.assertEqual(len(response.data['services']), 1) - expected1 = { - 'port': 5000, - 'protocol': 'UDP', - 'procfile_type': 'test' - } self.assertDictContainsSubset(expected1, response.data['services'][0]) + # create 2nd service response = self.client.post( '/v2/apps/{}/services'.format(app_id), { 'port': 5000, 'protocol': 'UDP', + 'target_port': 5000, 'procfile_type': 'test2' } ) @@ -85,27 +119,42 @@ def test_service_basic_ops(self): self.assertEqual(response.status_code, 200, response.data) self.assertEqual(len(response.data['services']), 2) expected2 = { - 'port': 5000, - 'protocol': 'UDP', - 'procfile_type': 'test2' + "domain": "%s-%s.%s.svc.%s" % ( + app_id, "test2", app_id, settings.KUBERNETES_CLUSTER_DOMAIN), + "ports": [{ + 'name': "%s-%s-%s-%s" % (app_id, "test2", 'udp', 5000), + 'port': 5000, + 'protocol': 'UDP', + 'targetPort': 5000, + }], + "procfile_type": "test2" } self.assertDictContainsSubset(expected2, response.data['services'][0]) self.assertDictContainsSubset(expected1, response.data['services'][1]) + # delete port + response = self.client.delete( + '/v2/apps/{}/services'.format(app_id), + {'procfile_type': 'test', "protocol": "TCP", "port": 6000} + ) + response = self.client.get('/v2/apps/{}/services'.format(app_id)) + self.assertDictContainsSubset(expected0, response.data['services'][1]) # delete 1st response = self.client.delete( '/v2/apps/{}/services'.format(app_id), - {'procfile_type': 'test'} + {'procfile_type': 'test', "protocol": "UDP", "port": 5000} ) self.assertEqual(response.status_code, 204, response.data) # delete 2nd response = self.client.delete( '/v2/apps/{}/services'.format(app_id), - {'procfile_type': 'test2'} + {'procfile_type': 'test2', "protocol": "UDP", "port": 5000} ) self.assertEqual(response.status_code, 204, response.data) + response = self.client.get('/v2/apps/{}/services'.format(app_id)) + self.assertEqual(response.data["services"], []) # delete non-existing (1st again) response = self.client.delete( '/v2/apps/{}/services'.format(app_id), - {'procfile_type': 'test'} + {'procfile_type': 'test', "protocol": "UDP", "port": 5000} ) self.assertEqual(response.status_code, 404, response.data) diff --git a/rootfs/api/urls.py b/rootfs/api/urls.py index 46cdd39fb..5ec3e4f53 100644 --- a/rootfs/api/urls.py +++ b/rootfs/api/urls.py @@ -158,7 +158,7 @@ views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})), # certificates re_path( - r'^certs/(?P[-_*.\w]+)/domain/(?P\**\.?[-\._\w]+)?$', + r'^certs/(?P[-_*.\w]+)/domain/(?P\**\.?[-\._\w]+)?/?$', views.CertificateViewSet.as_view({'delete': 'detach', 'post': 'attach'})), re_path( r'^certs/(?P[-_*.\w]+)/?$', @@ -176,8 +176,8 @@ {'post': 'create_or_update', 'get': 'list', 'delete': 'delete'})), # routes re_path( - r"^apps/(?P{})/routes/?$".format(settings.APP_URL_REGEX), - views.GatewayViewSet.as_view( + r"^apps/(?P{})/routes/(?P[-_\w]+)?/?$".format(settings.APP_URL_REGEX), + views.RouteViewSet.as_view( {'post': 'create', 'get': 'list', 'delete': 'delete'})), re_path( r"^apps/(?P{})/routes/(?P[-_\w]+)/attach/?$".format(settings.APP_URL_REGEX), diff --git a/rootfs/api/views.py b/rootfs/api/views.py index 4c189abdf..99dfeb2a2 100644 --- a/rootfs/api/views.py +++ b/rootfs/api/views.py @@ -403,7 +403,7 @@ def create_or_update(self, request, **kwargs): if service: for item in service.ports: if item["port"] == port: - return Response(status=status.HTTP_400_BAD_REQUEST, data="port is occupied") + return Response(status=status.HTTP_400_BAD_REQUEST, data={"detail": "port is occupied"}) # noqa http_status = status.HTTP_204_NO_CONTENT else: service = self.model(owner=app.owner, app=app, procfile_type=procfile_type) @@ -414,12 +414,13 @@ def create_or_update(self, request, **kwargs): def delete(self, request, **kwargs): port = self.get_serializer().validate_port(request.data.get('port')) + protocol = self.get_serializer().validate_protocol(request.data.get('protocol')) procfile_type = self.get_serializer().validate_procfile_type( request.data.get('procfile_type')) service = get_object_or_404(self.get_queryset(**kwargs), procfile_type=procfile_type) ports = [] for item in service.ports: - if item["port"] != port: + if item["port"] != port or item["protocol"] != protocol: ports.append(item) if len(ports) == 0: service.delete() @@ -848,26 +849,13 @@ class GatewayViewSet(AppResourceViewSet): model = models.gateway.Gateway filter_backends = [filters.SearchFilter] search_fields = ['^id', ] - service_class = serializers.ServiceSerializer serializer_class = serializers.GatewaySerializer - def _get_listeners(self, service, protocol, port): - if protocol in ("TLS", "HTTPS"): - for domain in models.domain.Domain.objects.filter(): - pass - listener_name = "%s-%s" % (str(service), port) - return [{ - "allowedRoutes": {"namespaces": {"from": "All"}}, - "name": listener_name, - "port": port, - "protocol": protocol, - }] - def create_or_update(self, request, *args, **kwargs): app = self.get_app() name = request.data['name'] - port = self.service_class.validate_port(request.data['port']) - protocol = self.service_class.validate_port(request.data['protocol']) + port = self.get_serializer().validate_port(request.data['port']) + protocol = self.get_serializer().validate_protocol(request.data['protocol']) gateway = app.gateway_set.filter(name=name).first() if not gateway: gateway = self.model(app=app, owner=app.owner, name=name) @@ -879,8 +867,8 @@ def create_or_update(self, request, *args, **kwargs): def delete(self, request, **kwargs): app = self.get_app() - port = self.service_class.validate_port(request.data.get('port')) - protocol = self.service_class.validate_port(request.data['protocol']) + port = self.get_serializer().validate_port(request.data.get('port')) + protocol = self.get_serializer().validate_protocol(request.data['protocol']) gateway = get_object_or_404(app.gateway_set, name=request.data.get("name")) ok, msg = gateway.remove(port, protocol) if not ok: @@ -897,7 +885,6 @@ class RouteViewSet(AppResourceViewSet): model = models.gateway.Route filter_backends = [filters.SearchFilter] search_fields = ['^id', ] - service_class = serializers.ServiceSerializer serializer_class = serializers.RouteSerializer def get(self, request, *args, **kwargs): @@ -908,33 +895,48 @@ def get(self, request, *args, **kwargs): def set(self, request, *args, **kwargs): app = self.get_app() route = get_object_or_404(self.model, app=app, name=kwargs['name']) - rules = json.loads(request.body.decode("utf8")) + rules = request.data + if isinstance(rules, str): + rules = json.loads(rules) route.rules = rules + ok, msg = route.check_rules() + if not ok: + return Response(status=status.HTTP_400_BAD_REQUEST, data=msg) route.save() return Response(status=status.HTTP_204_NO_CONTENT) def create(self, request, *args, **kwargs): app = self.get_app() - port = self.service_class.validate_port(request.data.get('port')) + port = self.get_serializer().validate_port(request.data.get('port')) app_settings = app.appsettings_set.latest() - procfile_type = self.service_class.validate_procfile_type( + procfile_type = self.get_serializer().validate_procfile_type( request.data.get('procfile_type')) + kind = self.get_serializer().validate_kind(request.data.get('kind')) + name = request.data['name'] + route = app.route_set.filter(name=name).first() + if route: + return Response(status=status.HTTP_400_BAD_REQUEST, data={ + "detail": f"this route {name} already exists"}) route = self.model( app=app, owner=app.owner, - kind=request.data['kind'], - name=request.data['name'], + kind=kind, + name=name, port=port, routable=app_settings.routable, procfile_type=procfile_type, ) - route.rules = {"backendRefs": route.get_backend_refs()} # default backends + route.rules = route.default_rules + if not route.rules[0]["backendRefs"]: + return Response(status=status.HTTP_400_BAD_REQUEST, data={ + "detail": "this route does not match services. please add service first." + }) route.save() return Response(status=status.HTTP_201_CREATED) def attach(self, request, *args, **kwargs): app = self.get_app() - port = self.service_class.validate_port(request.data.get('port')) + port = self.get_serializer().validate_port(request.data.get('port')) gateway_name = request.data['gateway'] route = get_object_or_404(self.model, app=app, name=kwargs['name']) attached, msg = route.attach(gateway_name, port) @@ -945,7 +947,7 @@ def attach(self, request, *args, **kwargs): def detach(self, request, *args, **kwargs): app = self.get_app() - port = self.service_class.validate_port(request.data.get('port')) + port = self.get_serializer().validate_port(request.data.get('port')) gateway_name = request.data['gateway'] route = get_object_or_404(self.model, app=app, name=kwargs['name']) detached, msg = route.detach(gateway_name, port) diff --git a/rootfs/scheduler/__init__.py b/rootfs/scheduler/__init__.py index a9b797f45..f375f2d2b 100644 --- a/rootfs/scheduler/__init__.py +++ b/rootfs/scheduler/__init__.py @@ -38,7 +38,6 @@ def get_k8s_session(k8s_api_verify_tls): class KubeHTTPClient(object): - abstract = False api_version = 'v1' api_prefix = 'api' # ISO-8601 which is used by kubernetes @@ -56,7 +55,7 @@ def __init__(self, url, k8s_api_verify_tls=True): name = str(res.__name__).lower() # singular component = name + 's' # make plural # check if component has already been processed - if component in resource_mapping or res.abstract: + if component in resource_mapping: continue # get past recursion problems in case of self reference diff --git a/rootfs/scheduler/mock.py b/rootfs/scheduler/mock.py index bf1742e71..3d8eed87f 100644 --- a/rootfs/scheduler/mock.py +++ b/rootfs/scheduler/mock.py @@ -83,6 +83,7 @@ def _acquire(self): 'horizontalpodautoscalers', 'scale', 'resourcequotas', 'ingresses', 'persistentvolumeclaims', 'serviceinstances', 'servicebindings', 'limitranges', 'gateways', 'httproutes', 'tcproutes', 'udproutes', + 'issuers', 'certificates' ] @@ -752,7 +753,6 @@ def put(request, context): subresource, resource_type, url = is_subresource(resource_type, url) if subresource != resource_type: cache.set(original_url, request.json(), None) - item = cache.get(url) if item is None: context.status_code = 404 @@ -792,7 +792,6 @@ def put(request, context): # Update the individual resource cache.set(url, data, None) - if resource_type in ['replicationcontrollers', 'replicasets']: upsert_pods(data, url) elif resource_type == 'deployments': diff --git a/rootfs/scheduler/resources/cert_manager.py b/rootfs/scheduler/resources/cert_manager.py index fc6608df6..a3c495392 100644 --- a/rootfs/scheduler/resources/cert_manager.py +++ b/rootfs/scheduler/resources/cert_manager.py @@ -6,7 +6,7 @@ class Issuer(Resource): api_prefix = 'apis' api_version = 'cert-manager.io/v1' - def get(self, namespace, name=None, ignore_exception=False, **kwargs): + def get(self, namespace, name=None, ignore_exception=True, **kwargs): """ Fetch a single Issuer or a list of Issuers """ @@ -51,7 +51,7 @@ def manifest(self, namespace, name, **kwargs): if "version" in kwargs: data["metadata"]["resourceVersion"] = kwargs.get("version") if "key_id" in kwargs and "key_secret" in kwargs: - data["metadata"]["acme"]["externalAccountBinding"] = { + data["spec"]["acme"]["externalAccountBinding"] = { "keyID": kwargs["key_id"], "keySecretRef": { "key": "secret", @@ -60,38 +60,38 @@ def manifest(self, namespace, name, **kwargs): } return data - def create(self, namespace, name, data, secret_type='Opaque', labels={}): - manifest = self.manifest(namespace, name, data, secret_type, labels) - url = self.api("/namespaces/{}/secrets", namespace) + def create(self, namespace, name, ignore_exception=True, **kwargs): + manifest = self.manifest(namespace, name, **kwargs) + url = self.api("/namespaces/{}/issuers", namespace) response = self.http_post(url, json=manifest) - if self.unhealthy(response.status_code): + if not ignore_exception and self.unhealthy(response.status_code): raise KubeHTTPException( response, - 'failed to create Secret "{}" in Namespace "{}"', name, namespace + 'failed to create issuer "{}" in Namespace "{}"', name, namespace ) return response - def update(self, namespace, name, data, secret_type='Opaque', labels={}): - manifest = self.manifest(namespace, name, data, secret_type, labels) - url = self.api("/namespaces/{}/secrets/{}", namespace, name) + def put(self, namespace, name, ignore_exception=True, **kwargs): + manifest = self.manifest(namespace, name, **kwargs) + url = self.api("/namespaces/{}/issuers/{}", namespace, name) response = self.http_put(url, json=manifest) - if self.unhealthy(response.status_code): + if not ignore_exception and self.unhealthy(response.status_code): raise KubeHTTPException( response, - 'failed to update Secret "{}" in Namespace "{}"', + 'failed to update issuer "{}" in Namespace "{}"', name, namespace ) return response def delete(self, namespace, name, ignore_exception=False): - url = self.api("/namespaces/{}/secrets/{}", namespace, name) + url = self.api("/namespaces/{}/issuers/{}", namespace, name) response = self.http_delete(url) if not ignore_exception and self.unhealthy(response.status_code): raise KubeHTTPException( response, - 'delete Secret "{}" in Namespace "{}"', name, namespace + 'delete issuer "{}" in Namespace "{}"', name, namespace ) return response diff --git a/rootfs/scheduler/resources/gateway.py b/rootfs/scheduler/resources/gateway.py index 05104d1ee..9e4188dea 100644 --- a/rootfs/scheduler/resources/gateway.py +++ b/rootfs/scheduler/resources/gateway.py @@ -70,9 +70,7 @@ def delete(self, namespace, name, ignore_exception=True): return response -class BaseRoute(Resource): - abstract = True - kind = "BaseRoute" +class Route(Resource): api_prefix = 'apis' api_version = 'gateway.networking.k8s.io/v1beta1' @@ -140,8 +138,19 @@ def delete(self, namespace, name, ignore_exception=True): return response -class HTTPRoute(BaseRoute): +class TCPRoute(Route): + kind = "TCPRoute" + api_version = 'gateway.networking.k8s.io/v1alpha2' + + +class UDPRoute(Route): + kind = "UDPRoute" + api_version = 'gateway.networking.k8s.io/v1alpha2' + + +class HTTPRoute(Route): kind = "HTTPRoute" + api_version = 'gateway.networking.k8s.io/v1beta1' def manifest(self, namespace, name, **kwargs): data = super().manifest(namespace, name, **kwargs) @@ -150,15 +159,23 @@ def manifest(self, namespace, name, **kwargs): return data -class GRPCRoute(HTTPRoute): +class GRPCRoute(Route): kind = "GRPCRoute" + api_version = 'gateway.networking.k8s.io/v1beta1' + def manifest(self, namespace, name, **kwargs): + data = super().manifest(namespace, name, **kwargs) + if "hostnames" in kwargs: + data["spec"]["hostnames"] = kwargs["hostnames"] + return data -class TCPRoute(BaseRoute): - kind = "TCPRoute" - api_version = 'gateway.networking.k8s.io/v1alpha2' +class TLSRoute(Route): + kind = "GRPCRoute" + api_version = 'gateway.networking.k8s.io/v1beta1' -class UDPRoute(BaseRoute): - kind = "UDPRoute" - api_version = 'gateway.networking.k8s.io/v1alpha2' + def manifest(self, namespace, name, **kwargs): + data = super().manifest(namespace, name, **kwargs) + if "hostnames" in kwargs: + data["spec"]["hostnames"] = kwargs["hostnames"] + return data diff --git a/rootfs/scheduler/resources/ingress/__init__.py b/rootfs/scheduler/resources/ingress/__init__.py deleted file mode 100644 index 388526a72..000000000 --- a/rootfs/scheduler/resources/ingress/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .base import IngressClass, WildcardPathIngress -from .nginx import NginxIngress -from .traefik import TraefikIngress - - -# registry ingress class by controller -IngressClass.register("k8s.io/ingress-gce", WildcardPathIngress) -IngressClass.register("ingress.k8s.aws/alb", WildcardPathIngress) -IngressClass.register("traefik.io/ingress-controller", TraefikIngress) -IngressClass.register("k8s.io/ingress-nginx", NginxIngress) diff --git a/rootfs/scheduler/resources/ingress/base.py b/rootfs/scheduler/resources/ingress/base.py deleted file mode 100644 index be45dc094..000000000 --- a/rootfs/scheduler/resources/ingress/base.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -from scheduler.exceptions import KubeHTTPException -from scheduler.resources import Resource - -MEM_REQUEST_BODY_BYTES = int(os.environ.get('MEM_REQUEST_BODY_BYTES', 1024)) -MAX_REQUEST_BODY_BYTES = int(os.environ.get('MAX_REQUEST_BODY_BYTES', 1024 * 1024 * 1024)) -MEM_RESPONSE_BODY_BYTES = int(os.environ.get('MEM_RESPONSE_BODY_BYTES', 1024 * 1024)) -MAX_RESPONSE_BODY_BYTES = int(os.environ.get('MAX_RESPONSE_BODY_BYTES', 1024 * 1024 * 1024)) - - -class BaseIngress(Resource): - abstract = True - api_version = 'networking.k8s.io/v1' - api_prefix = 'apis' - short_name = 'ingress' - ingress_path = "/" - ingress_class = None - - def manifest(self, namespace, ingress, **kwargs): - hosts, tls = kwargs.pop("hosts", None), kwargs.pop("tls", None) - version = kwargs.pop("version", None) - data = { - "kind": "Ingress", - "apiVersion": self.api_version, - "metadata": { - "name": ingress, - "namespace": namespace, - "annotations": { - "kubernetes.io/tls-acme": "true" - } - }, - "spec": { - "ingressClassName": self.ingress_class - } - } - if hosts: - data["spec"]["rules"] = [{ - "host": host, - "http": { - "paths": [ - { - "path": self.ingress_path, - "pathType": "Prefix", - "backend": { - "service": { - "name": ingress, - "port": { - "number": 80 - } - } - } - } - ] - } - } for host in hosts] - if tls: - data["spec"]["tls"] = tls - if version: - data["metadata"]["resourceVersion"] = version - return data - - def get(self, namespace, ingress=None, ignore_exception=False, **kwargs): - """ - Fetch a single Ingress or a list of Ingresses - """ - if ingress is not None: - url = self.api("/namespaces/{}/ingresses/{}", namespace, ingress) - message = 'get Ingress ' + ingress - else: - url = self.api("/namespaces/{}/ingresses", namespace) - message = 'get Ingresses' - - response = self.http_get(url, params=self.query_params(**kwargs)) - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, message) - - return response - - def create(self, namespace, ingress, ignore_exception=False, **kwargs): - url = self.api("/namespaces/{}/ingresses", namespace) - data = self.manifest(namespace, ingress, **kwargs) - response = self.http_post(url, json=data) - - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, "create Ingress {}".format(namespace)) - - return response - - def put(self, namespace, ingress, version, ignore_exception=False, **kwargs): - url = self.api("/namespaces/{}/ingresses/{}", namespace, ingress) - kwargs["version"] = version - data = self.manifest(namespace, ingress, **kwargs) - response = self.http_put(url, json=data) - - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, "put Ingress {}".format(namespace)) - - return response - - def delete(self, namespace, ingress, ignore_exception=True): - url = self.api("/namespaces/{}/ingresses/{}", namespace, ingress) - response = self.http_delete(url) - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, 'delete Ingress "{}"', namespace) - - return response - - -class IngressClass(Resource): - - short_name = 'ingress' - ingress_class_map = { - "default": BaseIngress - } - - def get(self, ingress_class, ignore_exception=True): - response = self.http_get(f"/apis/networking.k8s.io/v1/ingressclasses/{ingress_class}") - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, 'get IngressClasses "{}"', ingress_class) - - return response - - def __call__(self, ingress_class): - response = self.get(ingress_class) - controller = "default" - if response.status_code == 200: - controller = response.json()["spec"]["controller"] - ingress_cls = self.ingress_class_map.get(controller, self.ingress_class_map["default"]) - ingress_cls.ingress_class = ingress_class - return ingress_cls(self.url, self.k8s_api_verify_tls) - - @classmethod - def register(cls, ingress_name, ingress_cls): - cls.ingress_class_map[ingress_name] = ingress_cls - - -class WildcardPathIngress(BaseIngress): - ingress_path = "/*" diff --git a/rootfs/scheduler/resources/ingress/nginx.py b/rootfs/scheduler/resources/ingress/nginx.py deleted file mode 100644 index c5babaad0..000000000 --- a/rootfs/scheduler/resources/ingress/nginx.py +++ /dev/null @@ -1,28 +0,0 @@ -from .base import ( - BaseIngress, - MEM_REQUEST_BODY_BYTES, - MAX_REQUEST_BODY_BYTES, - MEM_RESPONSE_BODY_BYTES, - MAX_RESPONSE_BODY_BYTES, -) - - -class NginxIngress(BaseIngress): - - def manifest(self, namespace, ingress, **kwargs): - data = BaseIngress.manifest(self, namespace, ingress, **kwargs) - data["metadata"]["annotations"].update({ - "nginx.ingress.kubernetes.io/client-body-buffer-size": MEM_REQUEST_BODY_BYTES, - "nginx.ingress.kubernetes.io/proxy-body-size": MAX_REQUEST_BODY_BYTES, - "nginx.ingress.kubernetes.io/proxy-buffering": "on", - "nginx.ingress.kubernetes.io/proxy-buffer-size": MEM_RESPONSE_BODY_BYTES, - "nginx.ingress.kubernetes.io/proxy-max-temp-file-size": ( - MAX_RESPONSE_BODY_BYTES - MEM_RESPONSE_BODY_BYTES - ), - }) - if "ssl_redirect" in kwargs: - ssl_redirect = kwargs.pop("ssl_redirect") - data["metadata"]["annotations"].update({ - "nginx.ingress.kubernetes.io/ssl-redirect": ssl_redirect - }) - return data diff --git a/rootfs/scheduler/resources/ingress/traefik.py b/rootfs/scheduler/resources/ingress/traefik.py deleted file mode 100644 index 28c8cb3c0..000000000 --- a/rootfs/scheduler/resources/ingress/traefik.py +++ /dev/null @@ -1,141 +0,0 @@ -from scheduler.resources import Resource -from scheduler.exceptions import KubeHTTPException -from .base import ( - BaseIngress, - MEM_REQUEST_BODY_BYTES, - MAX_REQUEST_BODY_BYTES, - MEM_RESPONSE_BODY_BYTES, - MAX_RESPONSE_BODY_BYTES, -) - - -class BaseMiddleware(Resource): - abstract = True - api_version = 'traefik.containo.us/v1alpha1' - api_prefix = 'apis' - name_suffix = '' - - def fullname(self, base_name): - return f"{base_name}{self.name_suffix}" - - def manifest(self, name, resource_version=None): - manifest = { - "apiVersion": self.api_version, - "kind": "Middleware", - "metadata": { - "name": name - }, - } - if resource_version: - manifest["metadata"]["resourceVersion"] = resource_version - return manifest - - def get(self, namespace, name=None, **kwargs): - """ - Fetch a single Middleware or a list of Middlewares - """ - if name is not None: - url = self.api("/namespaces/{}/middlewares/{}", namespace, name) - else: - url = self.api("/namespaces/{}/middlewares", namespace) - return self.http_get(url, params=self.query_params(**kwargs)) - - def create(self, namespace, ignore_exception=False, **kwargs): - url = self.api("/namespaces/{}/middlewares", namespace) - data = self.manifest(**kwargs) - response = self.http_post(url, json=data) - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, "create middleware {}".format(namespace)) - return response - - def put(self, namespace, name, ignore_exception=False, **kwargs): - url = self.api("/namespaces/{}/middlewares/{}", namespace, name) - if kwargs.get("resource_version") is None: - response = self.get(namespace, name) - if self.unhealthy(response.status_code): - raise KubeHTTPException(response, "get middleware {}".format(name)) - resource_version = response.json()["metadata"]["resourceVersion"] - kwargs["resource_version"] = resource_version - data = self.manifest(name, **kwargs) - response = self.http_put(url, json=data) - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, "put middleware {}".format(namespace)) - return response - - def delete(self, namespace, name, ignore_exception=True): - url = self.api("/namespaces/{}/middlewares/{}", namespace, name) - response = self.http_delete(url) - if not ignore_exception and self.unhealthy(response.status_code): - raise KubeHTTPException(response, 'delete middlewares "{}"', namespace) - - return response - - -class BufferingMiddleware(BaseMiddleware): - name_suffix = "-buffering" - - def manifest(self, name, resource_version=None): - data = super().manifest(name, resource_version) - data.update({ - "spec": { - "buffering": { - "memRequestBodyBytes": MEM_REQUEST_BODY_BYTES, - "maxRequestBodyBytes": MAX_REQUEST_BODY_BYTES, - "memResponseBodyBytes": MEM_RESPONSE_BODY_BYTES, - "maxResponseBodyBytes": MAX_RESPONSE_BODY_BYTES, - } - } - }) - return data - - -class RedirectSchemeMiddleware(BaseMiddleware): - name_suffix = "-redirect-scheme" - - def manifest(self, name, resource_version=None): - data = super().manifest(name, resource_version) - data.update({ - "spec": { - "redirectScheme": { - "scheme": "https", - "permanent": True - } - } - }) - return data - - -class TraefikIngress(BaseIngress): - - def __init__(self, url, k8s_api_verify_tls=True): - super().__init__(url, k8s_api_verify_tls) - self.middlewares = { - "buffering": BufferingMiddleware(url, k8s_api_verify_tls), - "ssl_redirect": RedirectSchemeMiddleware(url, k8s_api_verify_tls), - } - - def manifest(self, namespace, ingress, **kwargs): - data = BaseIngress.manifest(self, namespace, ingress, **kwargs) - middlewares = [] - for middleware in self.middlewares.keys(): - if middleware == "buffering" or (middleware in kwargs and kwargs[middleware]): - name = self.middlewares[middleware].fullname(ingress) - middlewares.append(f"{namespace}-{name}@kubernetescrd") - data["metadata"]["annotations"].update({ - "traefik.ingress.kubernetes.io/router.middlewares": ",".join(middlewares) - }) - return data - - def create(self, namespace, ingress, **kwargs): - response = super().create(ingress, namespace, **kwargs) - for middleware in self.middlewares.keys(): - name = self.middlewares[middleware].fullname(ingress) - self.middlewares[middleware].create(namespace, name=name) - return response - - def delete(self, namespace, ingress): - response = super().delete(namespace, ingress) - for middleware in self.middlewares.keys(): - name = self.middlewares[middleware].fullname(ingress) - self.middlewares[middleware].delete(namespace, name) - return response diff --git a/rootfs/scheduler/resources/pod.py b/rootfs/scheduler/resources/pod.py index eee8db19a..f8bd82620 100644 --- a/rootfs/scheduler/resources/pod.py +++ b/rootfs/scheduler/resources/pod.py @@ -326,7 +326,7 @@ def _set_health_checks(self, container, env, **kwargs): healthchecks['livenessProbe']['httpGet']['port'] = env['PORT'] container.update(healthchecks) elif kwargs.get('routable', False): - self._default_readiness_probe(container, kwargs.get('build_type'), env.get('PORT', 5000)) # noqa + container.update(self._default_container_readiness_probe(env.get('PORT', 5000))) @staticmethod def _set_lifecycle_hooks(container, env, **kwargs): @@ -360,13 +360,6 @@ def _set_lifecycle_hooks(container, env, **kwargs): } container["lifecycle"] = dict(lifecycle) - def _default_readiness_probe(self, container, build_type, port=5000): - # Update only the application container with the health check - if build_type == "buildpack": - container.update(self._default_container_readiness_probe(port)) - elif port: - container.update(self._default_container_readiness_probe(port)) - @staticmethod def _default_container_readiness_probe(port, delay=5, timeout=5, period_seconds=5, success_threshold=1, failure_threshold=1): diff --git a/rootfs/scheduler/resources/secret.py b/rootfs/scheduler/resources/secret.py index 871a4b6da..e06a6b4f5 100644 --- a/rootfs/scheduler/resources/secret.py +++ b/rootfs/scheduler/resources/secret.py @@ -90,6 +90,20 @@ def create(self, namespace, name, data, secret_type='Opaque', labels={}): return response + def patch(self, namespace, name, data, secret_type='Opaque', labels={}): + manifest = self.manifest(namespace, name, data, secret_type, labels) + url = self.api("/namespaces/{}/secrets/{}", namespace, name) + response = self.http_patch(url, json=manifest, headers={ + "Content-Type": "application/merge-patch+json"}) + if self.unhealthy(response.status_code): + raise KubeHTTPException( + response, + 'failed to update Secret "{}" in Namespace "{}"', + name, namespace + ) + + return response + def update(self, namespace, name, data, secret_type='Opaque', labels={}): manifest = self.manifest(namespace, name, data, secret_type, labels) url = self.api("/namespaces/{}/secrets/{}", namespace, name) diff --git a/rootfs/scheduler/resources/service.py b/rootfs/scheduler/resources/service.py index 0d414d0f9..28f22e360 100644 --- a/rootfs/scheduler/resources/service.py +++ b/rootfs/scheduler/resources/service.py @@ -21,13 +21,15 @@ def manifest(self, namespace, name, **kwargs): 'type': kwargs.get("type", "ClusterIP"), 'ports': kwargs.get("ports"), 'selector': { - 'app': name, + 'app': namespace, 'heritage': 'drycc' } } } if "version" in kwargs: data["metadata"]["resourceVersion"] = kwargs.get("version") + if "procfile_type" in kwargs: + data["spec"]["selector"]['type'] = kwargs.get("procfile_type") return data def get(self, namespace, name=None, **kwargs): diff --git a/rootfs/scheduler/tests/test_cert_manager.py b/rootfs/scheduler/tests/test_cert_manager.py new file mode 100644 index 000000000..4ceeade0d --- /dev/null +++ b/rootfs/scheduler/tests/test_cert_manager.py @@ -0,0 +1,57 @@ +""" +Unit tests for the Drycc gateway module. +""" +from scheduler.tests import TestCase + + +class IssuerTest(TestCase): + """Tests scheduler gateway calls""" + + def create_issuer(self, namespace, name): + self.scheduler.ns.create(namespace) + data = { + "parent_refs": [{ + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "gateway_name", + }], + "server": "https://acme-v02.api.letsencrypt.org/directory", + "key_id": "key_id", + "key_secret": "key_secret", + } + return self.scheduler.issuer.create(namespace, name, **data) + + def test_issuer_get(self): + response = self.create_issuer("test-issuer", "test-issuer") + self.assertEqual(response.status_code, 201) + response = self.scheduler.issuer.get("test-issuer", "test-issuer") + self.assertEqual( + response.json()["spec"]["acme"]["server"], + "https://acme-v02.api.letsencrypt.org/directory" + ) + + def test_issuer_put(self): + response = self.create_issuer("test-issuer", "test-issuer") + self.assertEqual(response.status_code, 201) + data1 = { + "parent_refs": [{ + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": "gateway_name", + }], + "server": "https://test.test.com/directory", + "key_id": "key_id", + "key_secret": "key_secret", + } + self.scheduler.issuer.put("test-issuer", "test-issuer", **data1) + response = self.scheduler.issuer.get("test-issuer", "test-issuer", ignore_exception=True) + self.assertEqual( + response.json()["spec"]["acme"]["server"], "https://test.test.com/directory") + + def test_issuer_delete(self): + response = self.create_issuer("test-issuer", "test-issuer") + self.assertEqual(response.status_code, 201) + response = self.scheduler.issuer.delete("test-issuer", "test-issuer") + self.assertEqual(response.status_code, 200) + response = self.scheduler.issuer.get("test-issuer", "test-issuer", ignore_exception=True) + self.assertEqual(response.status_code, 404) diff --git a/rootfs/scheduler/tests/test_gateway.py b/rootfs/scheduler/tests/test_gateway.py index 504fb40f2..72d00c2cf 100644 --- a/rootfs/scheduler/tests/test_gateway.py +++ b/rootfs/scheduler/tests/test_gateway.py @@ -69,29 +69,15 @@ def test_delete_gateway(self): class HTTPRouteTest(TestCase): """Tests scheduler gateway calls""" - def create_http_route(self, name="test-gateway", weight=0.0): + def create_http_route(self, name="test-gateway"): self.scheduler.ns.create(name) - return self.scheduler.httproutes.create( + return self.scheduler.httproute.create( name, name, port=5000, procfile_type="web", - weight=weight - ) - - def test_create_http_route_1(self): - response = self.create_http_route(weight=0.9) - for backend in response.json()['spec']['rules'][0]['backendRefs']: - if backend["name"].endswith("canary"): - self.assertEqual(backend["weight"], 9) - else: - self.assertEqual(backend["weight"], 991) - - def test_create_http_route_2(self): - response = self.create_http_route(name="test-gateway1", weight=0.0) - self.assertEqual( - len(response.json()["spec"]["rules"][0]["backendRefs"]), - 1 + rules={}, + parent_refs={}, ) def test_patch_http_route(self): @@ -112,20 +98,21 @@ def test_patch_http_route(self): ] } ] - response = self.scheduler.httproutes.patch( + response = self.scheduler.httproute.patch( "test-gateway", "test-gateway", port=8000, procfile_type="cmd", version=1, - rules=rules + rules=rules, + parent_refs={}, ) self.assertEqual(response.json()["spec"]["rules"], rules) def test_delete_http_route(self): - self.test_create_http_route_2() - self.scheduler.httproutes.delete("test-gateway", "test-gateway") - response = self.scheduler.httproutes.get( + self.test_patch_http_route() + self.scheduler.httproute.delete("test-gateway", "test-gateway") + response = self.scheduler.httproute.get( "test-gateway", "test-gateway", ignore_exception=True @@ -136,29 +123,15 @@ def test_delete_http_route(self): class TCPRouteTest(TestCase): """Tests scheduler gateway calls""" - def create_tcp_route(self, name="test-tcp-route", weight=0.0): + def create_tcp_route(self, name="test-tcp-route"): self.scheduler.ns.create(name) return self.scheduler.tcproutes.create( name, name, port=5000, procfile_type="celery", - weight=weight - ) - - def test_create_tcp_route_1(self): - response = self.create_tcp_route(weight=0.9) - for backend in response.json()['spec']['rules'][0]['backendRefs']: - if backend["name"].endswith("canary"): - self.assertEqual(backend["weight"], 9) - else: - self.assertEqual(backend["weight"], 991) - - def test_create_tcp_route_2(self): - response = self.create_tcp_route(name="test-tcp-route1", weight=0.0) - self.assertEqual( - len(response.json()["spec"]["rules"][0]["backendRefs"]), - 1 + rules={}, + parent_refs={}, ) def test_patch_tcp_route(self): @@ -185,12 +158,13 @@ def test_patch_tcp_route(self): port=8000, procfile_type="cmd", version=1, - rules=rules + rules=rules, + parent_refs={}, ) self.assertEqual(response.json()["spec"]["rules"], rules) def test_delete_tcp_route(self): - self.test_create_tcp_route_2() + self.test_patch_tcp_route() self.scheduler.tcproutes.delete("test-tcp-route", "test-tcp-route") response = self.scheduler.tcproutes.get( "test-tcp-route", @@ -203,29 +177,15 @@ def test_delete_tcp_route(self): class UDPRouteTest(TestCase): """Tests scheduler gateway calls""" - def create_udp_route(self, name="test-udp-route", weight=0.0): + def create_udp_route(self, name="test-udp-route"): self.scheduler.ns.create(name) return self.scheduler.udproutes.create( name, name, port=5000, procfile_type="celery", - weight=weight - ) - - def test_create_udp_route_1(self): - response = self.create_udp_route(weight=0.9) - for backend in response.json()['spec']['rules'][0]['backendRefs']: - if backend["name"].endswith("canary"): - self.assertEqual(backend["weight"], 9) - else: - self.assertEqual(backend["weight"], 991) - - def test_create_udp_route_2(self): - response = self.create_udp_route(name="test-udp-route1", weight=0.0) - self.assertEqual( - len(response.json()["spec"]["rules"][0]["backendRefs"]), - 1 + rules={}, + parent_refs={} ) def test_patch_udp_route(self): @@ -252,12 +212,13 @@ def test_patch_udp_route(self): port=8000, procfile_type="cmd", version=1, - rules=rules + rules=rules, + parent_refs={} ) self.assertEqual(response.json()["spec"]["rules"], rules) def test_delete_udp_route(self): - self.test_create_udp_route_2() + self.test_patch_udp_route() self.scheduler.udproutes.delete("test-udp-route", "test-udp-route") response = self.scheduler.udproutes.get( "test-udp-route", diff --git a/rootfs/scheduler/tests/test_ingress.py b/rootfs/scheduler/tests/test_ingress.py deleted file mode 100644 index 568c13f54..000000000 --- a/rootfs/scheduler/tests/test_ingress.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Unit tests for the Drycc ingress module. - -Run the tests with './manage.py test ingress' -""" -from scheduler import KubeHTTPException -from scheduler.tests import TestCase - - -class IngressTest(TestCase): - """Tests scheduler ingress calls""" - - def test_create_ingress(self): - # Ingress assumes that the namespace and ingress name are always the same - self.scheduler.ns.create("test-ingress") - self.scheduler.ingress("default").create( - "test-ingress", "nginx", hosts=["test-ingress"], tls=[]) - - def test_get_ingresses(self): - response = self.scheduler.ingress("default").get("test-ingress") - data = response.json() - self.assertEqual(response.status_code, 200, data) - self.assertIn('items', data) - - def test_get_ingress(self): - with self.assertRaises( - KubeHTTPException, - msg="failed to get Ingress doesnotexist: 404 Not Found" - ): - self.scheduler.ingress("default").get('doesnotexist', 'doesnotexist') - - self.scheduler.ns.create("test-ingress-create") - self.scheduler.ingress("default").create( - "test-ingress-create", "test-ingress-create", hosts=["test-ingress-create", ]) - response = self.scheduler.ingress("default").get( - "test-ingress-create", "test-ingress-create") - data = response.json() - - self.assertEqual(response.status_code, 200, data) - self.assertEqual(data['apiVersion'], 'networking.k8s.io/v1') - self.assertEqual(data['kind'], 'Ingress') - - def test_delete_failure(self): - # test failure - with self.assertRaises( - KubeHTTPException, - msg="failed to delete Ingress doesnotexist: 404 Not Found" - ): - self.scheduler.ns.delete('doesnotexist') - - def test_delete_namespace(self): - self.scheduler.ns.create("test-ingress-delete") - self.scheduler.ingress("default").create( - "test-ingress-delete", "test-ingress-delete", hosts=["test-ingress-delete", ]) - response = self.scheduler.ingress("default").delete( - "test-ingress-delete", "test-ingress-delete") - self.assertEqual(response.status_code, 200, response.json()) diff --git a/rootfs/scheduler/tests/test_services.py b/rootfs/scheduler/tests/test_services.py index 13b68b7e1..e769a954b 100644 --- a/rootfs/scheduler/tests/test_services.py +++ b/rootfs/scheduler/tests/test_services.py @@ -16,11 +16,11 @@ def create(self, port=5000, protocol="TCP", target_port=5000): Helper function to create and verify a service on the namespace """ name = generate_random_name() - service = self.scheduler.svc.create(self.namespace, name, **{ + service = self.scheduler.svc.create(self.namespace, name, ports=[{ "port": port, "protocol": protocol, - "target_port": target_port, - }) + "targetPort": target_port, + }]) data = service.json() self.assertEqual(service.status_code, 201, data) self.assertEqual(data['metadata']['name'], name) @@ -52,17 +52,21 @@ def test_update_failure(self): def test_patch(self): name = self.create() expect = { - "port": 6000, - "protocol": "UDP", - "target_port": "6000", + "ports": [{ + "port": 6000, + "protocol": "UDP", + "targetPort": "6000", + }], "version": 1, } self.scheduler.svc.patch(self.namespace, name, **expect) service = self.scheduler.svc.get(self.namespace, name).json() self.assertEqual(expect, { - "port": service['spec']['ports'][0]['port'], - "protocol": service['spec']['ports'][0]['protocol'], - "target_port": service['spec']['ports'][0]['targetPort'], + "ports": [{ + "port": service['spec']['ports'][0]['port'], + "protocol": service['spec']['ports'][0]['protocol'], + "targetPort": service['spec']['ports'][0]['targetPort'], + }], "version": service['metadata']['resourceVersion'] }) @@ -72,7 +76,11 @@ def test_update(self): service = self.scheduler.svc.get(self.namespace, name).json() self.assertEqual(service['spec']['ports'][0]['targetPort'], 5000, service) - response = self.scheduler.svc.patch(self.namespace, name, target_port=5001) + response = self.scheduler.svc.patch(self.namespace, name, ports=[{ + "port": service['spec']['ports'][0]['port'], + "protocol": service['spec']['ports'][0]['protocol'], + "targetPort": 5001, + }]) self.assertEqual(response.status_code, 200, response.json()) service = self.scheduler.svc.get(self.namespace, name).json()