Browse Source

Support correctly updating the status of Knative clusteringresses, since Knative 0.8.0 requires this and 0.7.1 wants it (Knative ingresses aren't quite working yet)

Luke Shumaker 1 month ago
parent
commit
82dae1e810
6 changed files with 72 additions and 6 deletions
  1. 1
    0
      .dockerignore
  2. 2
    0
      Dockerfile
  3. 12
    3
      Makefile
  4. 37
    3
      ambassador/ambassador/ir/ir.py
  5. 19
    0
      ambassador/ambassador_diag/diagd.py
  6. 1
    0
      ambassador/watch_hook.py

+ 1
- 0
.dockerignore View File

@@ -4,4 +4,5 @@
4 4
 !demo/
5 5
 !releng/
6 6
 !watt
7
+!kubestatus
7 8
 !envoy-bin/

+ 2
- 0
Dockerfile View File

@@ -90,5 +90,7 @@ RUN chmod 755 entrypoint.sh grab-snapshots.py kick_ads.sh kubewatch.py post_upda
90 90
 # XXX Move to base image
91 91
 COPY watt .
92 92
 RUN chmod 755 watt
93
+COPY kubestatus .
94
+RUN chmod 755 kubestatus
93 95
 
94 96
 ENTRYPOINT [ "./entrypoint.sh" ]

+ 12
- 3
Makefile View File

@@ -143,6 +143,10 @@ KAT_BACKEND_RELEASE = 1.5.0
143 143
 WATT ?= watt
144 144
 WATT_VERSION ?= 0.6.0
145 145
 
146
+# Allow overriding which kubestatus we use.
147
+KUBESTATUS ?= kubestatus
148
+KUBESTATUS_VERSION ?= 0.7.0-preview
149
+
146 150
 # "make" by itself doesn't make the website. It takes too long and it doesn't
147 151
 # belong in the inner dev loop.
148 152
 all:
@@ -170,7 +174,7 @@ clean: clean-test
170 174
 	rm -f envoy-build-image.txt
171 175
 
172 176
 clobber: clean
173
-	-rm -rf watt
177
+	-rm -rf $(WATT) $(KUBESTATUS)
174 178
 	-$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf envoy envoy-src)
175 179
 	-rm -rf docs/node_modules
176 180
 	-rm -rf venv && echo && echo "Deleted venv, run 'deactivate' command if your virtualenv is activated" || true
@@ -360,7 +364,7 @@ docker-update-base:
360 364
 	$(MAKE) docker-push-base-images
361 365
 
362 366
 ambassador-docker-image: ambassador.docker
363
-ambassador.docker: Dockerfile base-go.docker base-py.docker $(WATT) $(WRITE_IFCHANGED) ambassador/ambassador/VERSION.py FORCE
367
+ambassador.docker: Dockerfile base-go.docker base-py.docker $(WATT) $(KUBESTATUS) $(WRITE_IFCHANGED) ambassador/ambassador/VERSION.py FORCE
364 368
 	docker build --build-arg BASE_GO_IMAGE=$(BASE_GO_IMAGE) --build-arg BASE_PY_IMAGE=$(BASE_PY_IMAGE) $(DOCKER_OPTS) -t $(AMBASSADOR_DOCKER_IMAGE) .
365 369
 	@docker image inspect $(AMBASSADOR_DOCKER_IMAGE) --format='{{.Id}}' | $(WRITE_IFCHANGED) $@
366 370
 
@@ -412,6 +416,11 @@ $(WATT): $(var.)WATT_VERSION
412 416
 	curl -o $(WATT) https://s3.amazonaws.com/datawire-static-files/watt/$(WATT_VERSION)/linux/amd64/watt
413 417
 	chmod go-w,a+x $(WATT)
414 418
 
419
+# This is for the docker image, so we don't use the current arch, we hardcode to linux/amd64
420
+$(KUBESTATUS): $(var.)KUBESTATUS_VERSION
421
+	curl -o $(KUBESTATUS) https://s3.amazonaws.com/datawire-static-files/kubestatus/$(KUBESTATUS_VERSION)/linux/amd64/kubestatus
422
+	chmod go-w,a+x $(KUBESTATUS)
423
+
415 424
 CLAIM_FILE=kubernaut-claim.txt
416 425
 CLAIM_NAME=$(shell cat $(CLAIM_FILE))
417 426
 
@@ -439,7 +448,7 @@ $(KAT_CLIENT): venv/kat-backend-$(KAT_BACKEND_RELEASE).tar.gz $(var.)KAT_BACKEND
439 448
 	cd venv && tar -xzf $(<F) kat-backend-$(KAT_BACKEND_RELEASE)/client/bin/client_$(GOOS)_$(GOARCH)
440 449
 	install -m0755 venv/kat-backend-$(KAT_BACKEND_RELEASE)/client/bin/client_$(GOOS)_$(GOARCH) $(CURDIR)/$(KAT_CLIENT)
441 450
 
442
-setup-develop: venv $(KAT_CLIENT) $(TELEPROXY) $(KUBERNAUT) $(WATT) version
451
+setup-develop: venv $(KAT_CLIENT) $(TELEPROXY) $(KUBERNAUT) $(WATT) $(KUBESTATUS) version
443 452
 
444 453
 cluster.yaml: $(CLAIM_FILE)
445 454
 ifeq ($(USE_KUBERNAUT), true)

+ 37
- 3
ambassador/ambassador/ir/ir.py View File

@@ -72,6 +72,7 @@ class IR:
72 72
     secret_handler: SecretHandler
73 73
     file_checker: Callable[[str], bool]
74 74
     resolvers: Dict[str, IRServiceResolver]
75
+    k8s_status_updates: Dict[str, Dict]
75 76
 
76 77
     def __init__(self, aconf: Config, secret_handler=None, file_checker=None) -> None:
77 78
         self.ambassador_id = Config.ambassador_id
@@ -126,6 +127,7 @@ class IR:
126 127
         self.listeners = []
127 128
         self.groups = {}
128 129
         self.resolvers = {}
130
+        self.k8s_status_updates = {}
129 131
 
130 132
         # OK, time to get this show on the road. First things first: set up the
131 133
         # Ambassador module.
@@ -452,7 +454,14 @@ class IR:
452 454
             final_knative_ingresses.update(knative_ingresses)
453 455
 
454 456
         for ci_name, ci in final_knative_ingresses.items():
455
-            self.logger.debug(f"Parsing ClusterIngress {ci_name}")
457
+            kind = ci['kind']
458
+
459
+            if kind == 'KnativeIngress':
460
+                kind = 'ingress.networking.internal.knative.dev'
461
+            else:
462
+                kind = kind.lower() + ".networking.internal.knative.dev"
463
+
464
+            self.logger.debug(f"Parsing {kind} {ci_name}")
456 465
 
457 466
             ci_rules = ci.get('rules', [])
458 467
             for rule_count, ci_rule in enumerate(ci_rules):
@@ -509,6 +518,28 @@ class IR:
509 518
                                 aconf.config['mappings'] = {}
510 519
                             aconf.config['mappings'][mapping_identifier] = ci_mapping
511 520
 
521
+                            # Remember that we need to update status on this resource.
522
+                            utcnow = datetime.datetime.utcnow().strftime("%y-%m-%dT%H:%M:%SZ")
523
+                            self.k8s_status_updates[ci_name] = (kind, {
524
+                                "conditions": [
525
+                                    {
526
+                                        "lastTransitionTime": utcnow,
527
+                                        "status": "True",
528
+                                        "type": "LoadBalancerReady"
529
+                                    },
530
+                                    {
531
+                                        "lastTransitionTime": utcnow,
532
+                                        "status": "True",
533
+                                        "type": "NetworkConfigured"
534
+                                    },
535
+                                    {
536
+                                        "lastTransitionTime": utcnow,
537
+                                        "status": "True",
538
+                                        "type": "Ready"
539
+                                    }
540
+                                ]
541
+                            })
542
+
512 543
     def ordered_groups(self) -> Iterable[IRBaseMappingGroup]:
513 544
         return reversed(sorted(self.groups.values(), key=lambda x: x['group_weight']))
514 545
 
@@ -561,7 +592,8 @@ class IR:
561 592
             'filters': [ filt.as_dict() for filt in self.filters ],
562 593
             'groups': [ group.as_dict() for group in self.ordered_groups() ],
563 594
             'tls_contexts': [ context.as_dict() for context in self.tls_contexts.values() ],
564
-            'services': self.services
595
+            'services': self.services,
596
+            'k8s_status_updates': self.k8s_status_updates
565 597
         }
566 598
 
567 599
         if self.tracing:
@@ -725,9 +757,11 @@ class IR:
725 757
         od['endpoint_routing_envoy_maglev_count'] = endpoint_routing_envoy_maglev_count
726 758
 
727 759
         cluster_ingresses = self.aconf.get_config("ClusterIngress")
728
-
729 760
         od['cluster_ingress_count'] = len(cluster_ingresses.keys()) if cluster_ingresses else 0
730 761
 
762
+        knative_ingresses = self.aconf.get_config("KnativeIngress")
763
+        od['knative_ingress_count'] = len(cluster_ingresses.keys()) if knative_ingresses else 0
764
+
731 765
         extauth = False
732 766
         extauth_proto: Optional[str] = None
733 767
         extauth_allow_body = False

+ 19
- 0
ambassador/ambassador_diag/diagd.py View File

@@ -897,6 +897,25 @@ class AmbassadorEventWatcher(threading.Thread):
897 897
             self.logger.info("notifying PID %d ambex" % app.ambex_pid)
898 898
             os.kill(app.ambex_pid, signal.SIGHUP)
899 899
 
900
+        if app.ir.k8s_status_updates:
901
+            for name in app.ir.k8s_status_updates.keys():
902
+                kind, update = app.ir.k8s_status_updates[name]
903
+
904
+                self.logger.info(f"doing K8s status update for {kind} {name}...")
905
+
906
+                text = json.dumps(update)
907
+
908
+                with open(f'/tmp/kstat-{kind}-{name}', 'w') as out:
909
+                    out.write(text)
910
+
911
+                cmd = [ '/ambassador/kubestatus', kind, '-f', f'metadata.name={name}', '-u', '/dev/fd/0' ]
912
+
913
+                try:
914
+                    rc = subprocess.run(cmd, input=text.encode('utf-8'), timeout=5)
915
+                    self.logger.info(f'...update finished, rc {rc.returncode}')
916
+                except subprocess.TimeoutExpired as e:
917
+                    self.logger.error(f'...update timed out, {e}')
918
+
900 919
         self.logger.info("configuration updated from snapshot %s" % snapshot)
901 920
         self._respond(rqueue, 200, 'configuration updated from snapshot %s' % snapshot)
902 921
 

+ 1
- 0
ambassador/watch_hook.py View File

@@ -113,6 +113,7 @@ class FakeIR(IR):
113 113
         self.ratelimit = None
114 114
         self.saved_secrets = {}
115 115
         self.secret_info = {}
116
+        self.k8s_status_updates = {}
116 117
 
117 118
         self.ambassador_module = IRAmbassador(self, aconf)
118 119
 

Loading…
Cancel
Save