GitOps for k8s
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

sync_test.go 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. package kubernetes
  2. import (
  3. "fmt"
  4. "os"
  5. "sort"
  6. "strings"
  7. "testing"
  8. helmopfake "github.com/fluxcd/helm-operator/pkg/client/clientset/versioned/fake"
  9. "github.com/ghodss/yaml"
  10. "github.com/go-kit/kit/log"
  11. "github.com/stretchr/testify/assert"
  12. corev1 "k8s.io/api/core/v1"
  13. crdfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
  14. "k8s.io/apimachinery/pkg/api/errors"
  15. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  16. "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
  17. "k8s.io/apimachinery/pkg/runtime"
  18. "k8s.io/apimachinery/pkg/runtime/schema"
  19. "k8s.io/client-go/discovery"
  20. "k8s.io/client-go/dynamic"
  21. dynamicfake "k8s.io/client-go/dynamic/fake"
  22. k8sclient "k8s.io/client-go/kubernetes"
  23. corefake "k8s.io/client-go/kubernetes/fake"
  24. k8s_testing "k8s.io/client-go/testing"
  25. "github.com/fluxcd/flux/cluster"
  26. kresource "github.com/fluxcd/flux/cluster/kubernetes/resource"
  27. fhrfake "github.com/fluxcd/flux/integrations/client/clientset/versioned/fake"
  28. "github.com/fluxcd/flux/resource"
  29. "github.com/fluxcd/flux/sync"
  30. )
  31. const (
  32. defaultTestNamespace = "unusual-default"
  33. )
  34. func fakeClients() (ExtendedClient, func()) {
  35. scheme := runtime.NewScheme()
  36. // Set this to `true` to output a trace of the API actions called
  37. // while running the tests
  38. const debug = false
  39. getAndList := metav1.Verbs([]string{"get", "list"})
  40. // Adding these means the fake dynamic client will find them, and
  41. // be able to enumerate (list and get) the resources that we care
  42. // about
  43. apiResources := []*metav1.APIResourceList{
  44. {
  45. GroupVersion: "apps/v1",
  46. APIResources: []metav1.APIResource{
  47. {Name: "deployments", SingularName: "deployment", Namespaced: true, Kind: "Deployment", Verbs: getAndList},
  48. },
  49. },
  50. {
  51. GroupVersion: "v1",
  52. APIResources: []metav1.APIResource{
  53. {Name: "namespaces", SingularName: "namespace", Namespaced: false, Kind: "Namespace", Verbs: getAndList},
  54. },
  55. },
  56. }
  57. coreClient := corefake.NewSimpleClientset(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: defaultTestNamespace}})
  58. fhrClient := fhrfake.NewSimpleClientset()
  59. hrClient := helmopfake.NewSimpleClientset()
  60. dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme)
  61. crdClient := crdfake.NewSimpleClientset()
  62. shutdown := make(chan struct{})
  63. discoveryClient := MakeCachedDiscovery(coreClient.Discovery(), crdClient, shutdown)
  64. // Assigned here, since this is _also_ used by the (fake)
  65. // discovery client therein, and ultimately by
  66. // getResourcesInStack since that uses the core clientset to
  67. // enumerate the namespaces.
  68. coreClient.Fake.Resources = apiResources
  69. if debug {
  70. for _, fake := range []*k8s_testing.Fake{&coreClient.Fake, &fhrClient.Fake, &hrClient.Fake, &dynamicClient.Fake} {
  71. fake.PrependReactor("*", "*", func(action k8s_testing.Action) (bool, runtime.Object, error) {
  72. gvr := action.GetResource()
  73. fmt.Printf("[DEBUG] action: %s ns:%s %s/%s %s\n", action.GetVerb(), action.GetNamespace(), gvr.Group, gvr.Version, gvr.Resource)
  74. return false, nil, nil
  75. })
  76. }
  77. }
  78. ec := ExtendedClient{
  79. coreClient: coreClient,
  80. fluxHelmClient: fhrClient,
  81. helmOperatorClient: hrClient,
  82. dynamicClient: dynamicClient,
  83. discoveryClient: discoveryClient,
  84. }
  85. return ec, func() { close(shutdown) }
  86. }
  87. // fakeApplier is an Applier that just forwards changeset operations
  88. // to a dynamic client. It doesn't try to properly patch resources
  89. // when that might be expected; it just overwrites them. But this is
  90. // enough for checking whether sync operations succeeded and had the
  91. // correct effect, which is either to "upsert", or delete, resources.
  92. type fakeApplier struct {
  93. dynamicClient dynamic.Interface
  94. coreClient k8sclient.Interface
  95. defaultNS string
  96. commandRun bool
  97. }
  98. func groupVersionResource(res *unstructured.Unstructured) schema.GroupVersionResource {
  99. gvk := res.GetObjectKind().GroupVersionKind()
  100. return schema.GroupVersionResource{Group: gvk.Group, Version: gvk.Version, Resource: strings.ToLower(gvk.Kind) + "s"}
  101. }
  102. func (a fakeApplier) apply(_ log.Logger, cs changeSet, errored map[resource.ID]error) cluster.SyncError {
  103. var errs []cluster.ResourceError
  104. operate := func(obj applyObject, cmd string) {
  105. a.commandRun = true
  106. var unstruct map[string]interface{}
  107. if err := yaml.Unmarshal(obj.Payload, &unstruct); err != nil {
  108. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  109. return
  110. }
  111. res := &unstructured.Unstructured{Object: unstruct}
  112. // This is a special case trapdoor, for testing failure to
  113. // apply a resource.
  114. if errStr := res.GetAnnotations()["error"]; errStr != "" {
  115. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, fmt.Errorf(errStr)})
  116. return
  117. }
  118. gvr := groupVersionResource(res)
  119. c := a.dynamicClient.Resource(gvr)
  120. // This is an approximation to what `kubectl` does in filling
  121. // in the fallback namespace (from config). In the case of
  122. // non-namespaced entities, it will be ignored by the fake
  123. // client (FIXME: make sure of this).
  124. apiRes := findAPIResource(gvr, a.coreClient.Discovery())
  125. if apiRes == nil {
  126. panic("no APIResource found for " + gvr.String())
  127. }
  128. var dc dynamic.ResourceInterface = c
  129. ns := res.GetNamespace()
  130. if apiRes.Namespaced {
  131. if ns == "" {
  132. ns = a.defaultNS
  133. res.SetNamespace(ns)
  134. }
  135. dc = c.Namespace(ns)
  136. }
  137. name := res.GetName()
  138. if cmd == "apply" {
  139. _, err := dc.Get(name, metav1.GetOptions{})
  140. switch {
  141. case errors.IsNotFound(err):
  142. _, err = dc.Create(res, metav1.CreateOptions{})
  143. case err == nil:
  144. _, err = dc.Update(res, metav1.UpdateOptions{})
  145. }
  146. if err != nil {
  147. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  148. return
  149. }
  150. if res.GetKind() == "Namespace" {
  151. // We also create namespaces in the core fake client since the dynamic client
  152. // and core clients don't share resources
  153. var ns corev1.Namespace
  154. if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstruct, &ns); err != nil {
  155. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  156. return
  157. }
  158. _, err := a.coreClient.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{})
  159. switch {
  160. case errors.IsNotFound(err):
  161. _, err = a.coreClient.CoreV1().Namespaces().Create(&ns)
  162. case err == nil:
  163. _, err = a.coreClient.CoreV1().Namespaces().Update(&ns)
  164. }
  165. if err != nil {
  166. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  167. return
  168. }
  169. }
  170. } else if cmd == "delete" {
  171. if err := dc.Delete(name, &metav1.DeleteOptions{}); err != nil {
  172. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  173. return
  174. }
  175. if res.GetKind() == "Namespace" {
  176. // We also create namespaces in the core fake client since the dynamic client
  177. // and core clients don't share resources
  178. if err := a.coreClient.CoreV1().Namespaces().Delete(res.GetName(), &metav1.DeleteOptions{}); err != nil {
  179. errs = append(errs, cluster.ResourceError{obj.ResourceID, obj.Source, err})
  180. return
  181. }
  182. }
  183. } else {
  184. panic("unknown action: " + cmd)
  185. }
  186. }
  187. for _, obj := range cs.objs["delete"] {
  188. operate(obj, "delete")
  189. }
  190. for _, obj := range cs.objs["apply"] {
  191. operate(obj, "apply")
  192. }
  193. if len(errs) == 0 {
  194. return nil
  195. }
  196. return errs
  197. }
  198. func findAPIResource(gvr schema.GroupVersionResource, disco discovery.DiscoveryInterface) *metav1.APIResource {
  199. groupVersion := gvr.Version
  200. if gvr.Group != "" {
  201. groupVersion = gvr.Group + "/" + groupVersion
  202. }
  203. reses, err := disco.ServerResourcesForGroupVersion(groupVersion)
  204. if err != nil {
  205. return nil
  206. }
  207. for _, res := range reses.APIResources {
  208. if res.Name == gvr.Resource {
  209. return &res
  210. }
  211. }
  212. return nil
  213. }
  214. // ---
  215. func setup(t *testing.T) (*Cluster, *fakeApplier, func()) {
  216. clients, cancel := fakeClients()
  217. applier := &fakeApplier{dynamicClient: clients.dynamicClient, coreClient: clients.coreClient, defaultNS: defaultTestNamespace}
  218. kube := &Cluster{
  219. applier: applier,
  220. client: clients,
  221. logger: log.NewLogfmtLogger(os.Stdout),
  222. }
  223. return kube, applier, cancel
  224. }
  225. func TestSyncNop(t *testing.T) {
  226. kube, mock, cancel := setup(t)
  227. defer cancel()
  228. if err := kube.Sync(cluster.SyncSet{}); err != nil {
  229. t.Errorf("%#v", err)
  230. }
  231. if mock.commandRun {
  232. t.Error("expected no commands run")
  233. }
  234. }
  235. func TestSyncTolerateEmptyGroupVersion(t *testing.T) {
  236. kube, _, cancel := setup(t)
  237. defer cancel()
  238. // Add a GroupVersion without API Resources
  239. fakeClient := kube.client.coreClient.(*corefake.Clientset)
  240. fakeClient.Resources = append(fakeClient.Resources, &metav1.APIResourceList{GroupVersion: "foo.bar/v1"})
  241. // We should tolerate the error caused in the cache due to the
  242. // GroupVersion being empty
  243. err := kube.Sync(cluster.SyncSet{})
  244. assert.NoError(t, err)
  245. // No errors the second time either
  246. err = kube.Sync(cluster.SyncSet{})
  247. assert.NoError(t, err)
  248. }
  249. type failingDiscoveryClient struct {
  250. discovery.DiscoveryInterface
  251. }
  252. func (d *failingDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
  253. return nil, errors.NewServiceUnavailable("")
  254. }
  255. func TestSyncTolerateMetricsErrors(t *testing.T) {
  256. kube, _, cancel := setup(t)
  257. // Replace the discovery client by one returning errors when asking for resources
  258. cancel()
  259. crdClient := crdfake.NewSimpleClientset()
  260. shutdown := make(chan struct{})
  261. defer close(shutdown)
  262. newDiscoveryClient := &failingDiscoveryClient{kube.client.coreClient.Discovery()}
  263. kube.client.discoveryClient = MakeCachedDiscovery(newDiscoveryClient, crdClient, shutdown)
  264. // Check that syncing results in an error for groups other than metrics
  265. fakeClient := kube.client.coreClient.(*corefake.Clientset)
  266. fakeClient.Resources = []*metav1.APIResourceList{{GroupVersion: "foo.bar/v1"}}
  267. err := kube.Sync(cluster.SyncSet{})
  268. assert.Error(t, err)
  269. // Check that syncing doesn't result in an error for a metrics group
  270. kube.client.discoveryClient.(*cachedDiscovery).CachedDiscoveryInterface.Invalidate()
  271. fakeClient.Resources = []*metav1.APIResourceList{{GroupVersion: "custom.metrics.k8s.io/v1"}}
  272. err = kube.Sync(cluster.SyncSet{})
  273. assert.NoError(t, err)
  274. }
  275. func TestSync(t *testing.T) {
  276. const ns1 = `---
  277. apiVersion: v1
  278. kind: Namespace
  279. metadata:
  280. name: foobar
  281. `
  282. const defs1 = `---
  283. apiVersion: apps/v1
  284. kind: Deployment
  285. metadata:
  286. name: dep1
  287. namespace: foobar
  288. `
  289. const defs2 = `---
  290. apiVersion: apps/v1
  291. kind: Deployment
  292. metadata:
  293. name: dep2
  294. namespace: foobar
  295. `
  296. const ns3 = `---
  297. apiVersion: v1
  298. kind: Namespace
  299. metadata:
  300. name: other
  301. `
  302. const defs3 = `---
  303. apiVersion: apps/v1
  304. kind: Deployment
  305. metadata:
  306. name: dep3
  307. namespace: other
  308. `
  309. // checkSame is a check that a result returned from the cluster is
  310. // the same as an expected. labels and annotations may be altered
  311. // by the sync process; we'll look at the "spec" field as an
  312. // indication of whether the resources are equivalent or not.
  313. checkSame := func(t *testing.T, expected []byte, actual *unstructured.Unstructured) {
  314. var expectedSpec struct{ Spec map[string]interface{} }
  315. if err := yaml.Unmarshal(expected, &expectedSpec); err != nil {
  316. t.Error(err)
  317. return
  318. }
  319. if expectedSpec.Spec != nil {
  320. assert.Equal(t, expectedSpec.Spec, actual.Object["spec"])
  321. }
  322. }
  323. test := func(t *testing.T, kube *Cluster, defs, expectedAfterSync string, expectErrors bool) {
  324. saved := getDefaultNamespace
  325. getDefaultNamespace = func() (string, error) { return defaultTestNamespace, nil }
  326. defer func() { getDefaultNamespace = saved }()
  327. namespacer, err := NewNamespacer(kube.client.coreClient.Discovery())
  328. if err != nil {
  329. t.Fatal(err)
  330. }
  331. manifests := NewManifests(namespacer, log.NewLogfmtLogger(os.Stdout))
  332. resources0, err := kresource.ParseMultidoc([]byte(defs), "before")
  333. if err != nil {
  334. t.Fatal(err)
  335. }
  336. // Needed to get from KubeManifest to resource.Resource
  337. resources, err := manifests.setEffectiveNamespaces(resources0)
  338. if err != nil {
  339. t.Fatal(err)
  340. }
  341. resourcesByID := map[string]resource.Resource{}
  342. for _, r := range resources {
  343. resourcesByID[r.ResourceID().String()] = r
  344. }
  345. err = sync.Sync("testset", resourcesByID, kube)
  346. if !expectErrors && err != nil {
  347. t.Error(err)
  348. }
  349. expected, err := kresource.ParseMultidoc([]byte(expectedAfterSync), "after")
  350. if err != nil {
  351. panic(err)
  352. }
  353. // Now check that the resources were created
  354. actual, err := kube.getAllowedGCMarkedResourcesInSyncSet("testset")
  355. if err != nil {
  356. t.Fatal(err)
  357. }
  358. for id := range actual {
  359. if _, ok := expected[id]; !ok {
  360. t.Errorf("resource present after sync but not in resources applied: %q (present: %v)", id, actual)
  361. if j, err := yaml.Marshal(actual[id].obj); err == nil {
  362. println(string(j))
  363. }
  364. continue
  365. }
  366. checkSame(t, expected[id].Bytes(), actual[id].obj)
  367. }
  368. for id := range expected {
  369. if _, ok := actual[id]; !ok {
  370. t.Errorf("resource supposed to be synced but not present: %q (present: %v)", id, actual)
  371. }
  372. // no need to compare values, since we already considered
  373. // the intersection of actual and expected above.
  374. }
  375. }
  376. t.Run("sync adds and GCs resources", func(t *testing.T) {
  377. kube, _, cancel := setup(t)
  378. defer cancel()
  379. // without GC on, resources persist if they are not mentioned in subsequent syncs.
  380. test(t, kube, "", "", false)
  381. test(t, kube, ns1+defs1, ns1+defs1, false)
  382. test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2, false)
  383. test(t, kube, ns3+defs3, ns1+defs1+defs2+ns3+defs3, false)
  384. // Now with GC switched on. That means if we don't include a
  385. // resource in a sync, it should be deleted.
  386. kube.GC = true
  387. test(t, kube, ns1+defs2+ns3+defs3, ns1+defs2+ns3+defs3, false)
  388. test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2, false)
  389. test(t, kube, "", "", false)
  390. })
  391. t.Run("sync adds and GCs dry run", func(t *testing.T) {
  392. kube, _, cancel := setup(t)
  393. defer cancel()
  394. // without GC on, resources persist if they are not mentioned in subsequent syncs.
  395. test(t, kube, "", "", false)
  396. test(t, kube, ns1+defs1, ns1+defs1, false)
  397. test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2, false)
  398. test(t, kube, ns3+defs3, ns1+defs1+defs2+ns3+defs3, false)
  399. // with GC dry run the collect garbage routine is running but only logging results with out collecting any resources
  400. kube.DryGC = true
  401. test(t, kube, ns1+defs2+ns3+defs3, ns1+defs1+defs2+ns3+defs3, false)
  402. test(t, kube, ns1+defs1+defs2, ns1+defs1+defs2+ns3+defs3, false)
  403. test(t, kube, "", ns1+defs1+defs2+ns3+defs3, false)
  404. })
  405. t.Run("sync won't incorrectly delete non-namespaced resources", func(t *testing.T) {
  406. kube, _, cancel := setup(t)
  407. defer cancel()
  408. kube.GC = true
  409. const nsDef = `
  410. apiVersion: v1
  411. kind: Namespace
  412. metadata:
  413. name: bar-ns
  414. `
  415. test(t, kube, nsDef, nsDef, false)
  416. })
  417. t.Run("sync won't delete resources that got the fallback namespace when created", func(t *testing.T) {
  418. // NB: this tests the fake client implementation to some
  419. // extent as well. It relies on it to reflect the kubectl
  420. // behaviour of giving things that need a namespace some
  421. // fallback (this would come from kubeconfig usually); and,
  422. // for things that _don't_ have a namespace to have it
  423. // stripped out.
  424. kube, _, cancel := setup(t)
  425. defer cancel()
  426. kube.GC = true
  427. const withoutNS = `
  428. apiVersion: apps/v1
  429. kind: Deployment
  430. metadata:
  431. name: depFallbackNS
  432. `
  433. const withNS = `
  434. apiVersion: apps/v1
  435. kind: Deployment
  436. metadata:
  437. name: depFallbackNS
  438. namespace: ` + defaultTestNamespace + `
  439. `
  440. test(t, kube, withoutNS, withNS, false)
  441. })
  442. t.Run("sync won't delete resources whose garbage collection mark was copied to", func(t *testing.T) {
  443. kube, _, cancel := setup(t)
  444. defer cancel()
  445. kube.GC = true
  446. depName := "dep"
  447. depNS := "foobar"
  448. dep := fmt.Sprintf(`---
  449. apiVersion: apps/v1
  450. kind: Deployment
  451. metadata:
  452. name: %s
  453. namespace: %s
  454. `, depName, depNS)
  455. // Add dep to the cluster through syncing
  456. test(t, kube, ns1+dep, ns1+dep, false)
  457. // Add a copy of dep (including the GCmark label) with different name directly to the cluster
  458. gvr := schema.GroupVersionResource{
  459. Group: "apps",
  460. Version: "v1",
  461. Resource: "deployments",
  462. }
  463. client := kube.client.dynamicClient.Resource(gvr).Namespace(depNS)
  464. depActual, err := client.Get(depName, metav1.GetOptions{})
  465. assert.NoError(t, err)
  466. depCopy := depActual.DeepCopy()
  467. depCopyName := depName + "copy"
  468. depCopy.SetName(depCopyName)
  469. depCopyActual, err := client.Create(depCopy, metav1.CreateOptions{})
  470. assert.NoError(t, err)
  471. // Check that both dep and its copy have the same GCmark label
  472. assert.Equal(t, depActual.GetName()+"copy", depCopyActual.GetName())
  473. assert.NotEmpty(t, depActual.GetLabels()[gcMarkLabel])
  474. assert.Equal(t, depActual.GetLabels()[gcMarkLabel], depCopyActual.GetLabels()[gcMarkLabel])
  475. // Remove defs1 from the cluster through syncing
  476. test(t, kube, "", "", false)
  477. // Check that defs1 is removed from the cluster but its copy isn't, due to having a different name
  478. _, err = client.Get(depName, metav1.GetOptions{})
  479. assert.Error(t, err)
  480. _, err = client.Get(depCopyName, metav1.GetOptions{})
  481. assert.NoError(t, err)
  482. })
  483. t.Run("sync won't delete if apply failed", func(t *testing.T) {
  484. kube, _, cancel := setup(t)
  485. defer cancel()
  486. kube.GC = true
  487. const defs1invalid = `---
  488. apiVersion: apps/v1
  489. kind: Deployment
  490. metadata:
  491. namespace: foobar
  492. name: dep1
  493. annotations:
  494. error: fail to apply this
  495. `
  496. test(t, kube, ns1+defs1, ns1+defs1, false)
  497. test(t, kube, ns1+defs1invalid, ns1+defs1invalid, true)
  498. })
  499. t.Run("sync doesn't apply or delete manifests marked with ignore", func(t *testing.T) {
  500. kube, _, cancel := setup(t)
  501. defer cancel()
  502. kube.GC = true
  503. const dep1 = `---
  504. apiVersion: apps/v1
  505. kind: Deployment
  506. metadata:
  507. namespace: foobar
  508. name: dep1
  509. spec:
  510. metadata:
  511. labels: {app: foo}
  512. `
  513. const dep2 = `---
  514. apiVersion: apps/v1
  515. kind: Deployment
  516. metadata:
  517. namespace: foobar
  518. name: dep2
  519. annotations: {flux.weave.works/ignore: "true"}
  520. `
  521. // dep1 is created, but dep2 is ignored
  522. test(t, kube, ns1+dep1+dep2, ns1+dep1, false)
  523. const dep1ignored = `---
  524. apiVersion: apps/v1
  525. kind: Deployment
  526. metadata:
  527. namespace: foobar
  528. name: dep1
  529. annotations:
  530. flux.weave.works/ignore: "true"
  531. spec:
  532. metadata:
  533. labels: {app: bar}
  534. `
  535. // dep1 is not updated, but neither is it deleted
  536. test(t, kube, ns1+dep1ignored+dep2, ns1+dep1, false)
  537. })
  538. t.Run("sync doesn't update a cluster resource marked with ignore", func(t *testing.T) {
  539. const dep1 = `---
  540. apiVersion: apps/v1
  541. kind: Deployment
  542. metadata:
  543. namespace: foobar
  544. name: dep1
  545. spec:
  546. metadata:
  547. labels:
  548. app: original
  549. `
  550. kube, _, cancel := setup(t)
  551. defer cancel()
  552. // This just checks the starting assumption: dep1 exists in the cluster
  553. test(t, kube, ns1+dep1, ns1+dep1, false)
  554. // Now we'll mark it as ignored _in the cluster_ (i.e., the
  555. // equivalent of `kubectl annotate`)
  556. dc := kube.client.dynamicClient
  557. rc := dc.Resource(schema.GroupVersionResource{
  558. Group: "apps",
  559. Version: "v1",
  560. Resource: "deployments",
  561. })
  562. res, err := rc.Namespace("foobar").Get("dep1", metav1.GetOptions{})
  563. if err != nil {
  564. t.Fatal(err)
  565. }
  566. annots := res.GetAnnotations()
  567. annots["flux.weave.works/ignore"] = "true"
  568. res.SetAnnotations(annots)
  569. if _, err = rc.Namespace("foobar").Update(res, metav1.UpdateOptions{}); err != nil {
  570. t.Fatal(err)
  571. }
  572. const mod1 = `---
  573. apiVersion: apps/v1
  574. kind: Deployment
  575. metadata:
  576. namespace: foobar
  577. name: dep1
  578. spec:
  579. metadata:
  580. labels:
  581. app: modified
  582. `
  583. // Check that dep1, which is marked ignore in the cluster, is
  584. // neither updated or deleted
  585. test(t, kube, ns1+mod1, ns1+dep1, false)
  586. })
  587. t.Run("sync doesn't update or delete a pre-existing resource marked with ignore", func(t *testing.T) {
  588. kube, _, cancel := setup(t)
  589. defer cancel()
  590. const existing = `---
  591. apiVersion: apps/v1
  592. kind: Deployment
  593. metadata:
  594. namespace: foobar
  595. name: dep1
  596. annotations: {flux.weave.works/ignore: "true"}
  597. spec:
  598. metadata:
  599. labels: {foo: original}
  600. `
  601. var dep1obj map[string]interface{}
  602. err := yaml.Unmarshal([]byte(existing), &dep1obj)
  603. assert.NoError(t, err)
  604. dep1res := &unstructured.Unstructured{Object: dep1obj}
  605. gvr := groupVersionResource(dep1res)
  606. var ns1obj corev1.Namespace
  607. err = yaml.Unmarshal([]byte(ns1), &ns1obj)
  608. assert.NoError(t, err)
  609. // Put the pre-existing resource in the cluster
  610. _, err = kube.client.coreClient.CoreV1().Namespaces().Create(&ns1obj)
  611. assert.NoError(t, err)
  612. dc := kube.client.dynamicClient.Resource(gvr).Namespace(dep1res.GetNamespace())
  613. _, err = dc.Create(dep1res, metav1.CreateOptions{})
  614. assert.NoError(t, err)
  615. // Check that our resource-getting also sees the pre-existing resource
  616. resources, err := kube.getAllowedResourcesBySelector("")
  617. assert.NoError(t, err)
  618. assert.Contains(t, resources, "foobar:deployment/dep1")
  619. // NB test checks the _synced_ resources, so this just asserts
  620. // the precondition, that nothing is synced
  621. test(t, kube, "", "", false)
  622. // .. but, our resource is still there.
  623. r, err := dc.Get(dep1res.GetName(), metav1.GetOptions{})
  624. assert.NoError(t, err)
  625. assert.NotNil(t, r)
  626. const update = `---
  627. apiVersion: apps/v1
  628. kind: Deployment
  629. metadata:
  630. namespace: foobar
  631. name: dep1
  632. spec:
  633. metadata:
  634. labels: {foo: modified}
  635. `
  636. // Check that it's not been synced (i.e., still not included in synced resources)
  637. test(t, kube, update, "", false)
  638. // Check that it still exists, as created
  639. r, err = dc.Get(dep1res.GetName(), metav1.GetOptions{})
  640. assert.NoError(t, err)
  641. assert.NotNil(t, r)
  642. checkSame(t, []byte(existing), r)
  643. })
  644. }
  645. // ----
  646. // TestApplyOrder checks that applyOrder works as expected.
  647. func TestApplyOrder(t *testing.T) {
  648. objs := []applyObject{
  649. {ResourceID: resource.MakeID("test", "Deployment", "deploy")},
  650. {ResourceID: resource.MakeID("test", "Secret", "secret")},
  651. {ResourceID: resource.MakeID("", "Namespace", "namespace")},
  652. }
  653. sort.Sort(applyOrder(objs))
  654. for i, name := range []string{"namespace", "secret", "deploy"} {
  655. _, _, objName := objs[i].ResourceID.Components()
  656. if objName != name {
  657. t.Errorf("Expected %q at position %d, got %q", name, i, objName)
  658. }
  659. }
  660. }