GitOps for k8s
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

daemon.go 27KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. package daemon
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "sort"
  7. "strings"
  8. "time"
  9. "github.com/go-kit/kit/log"
  10. "github.com/pkg/errors"
  11. "github.com/fluxcd/flux/api"
  12. "github.com/fluxcd/flux/api/v10"
  13. "github.com/fluxcd/flux/api/v11"
  14. "github.com/fluxcd/flux/api/v6"
  15. "github.com/fluxcd/flux/api/v9"
  16. "github.com/fluxcd/flux/cluster"
  17. "github.com/fluxcd/flux/event"
  18. "github.com/fluxcd/flux/git"
  19. "github.com/fluxcd/flux/guid"
  20. "github.com/fluxcd/flux/image"
  21. "github.com/fluxcd/flux/job"
  22. "github.com/fluxcd/flux/manifests"
  23. "github.com/fluxcd/flux/policy"
  24. "github.com/fluxcd/flux/registry"
  25. "github.com/fluxcd/flux/release"
  26. "github.com/fluxcd/flux/resource"
  27. "github.com/fluxcd/flux/sync"
  28. "github.com/fluxcd/flux/update"
  29. )
  30. const (
  31. // This is set to be in sympathy with the request / RPC timeout (i.e., empirically)
  32. defaultHandlerTimeout = 10 * time.Second
  33. // A job can take an arbitrary amount of time but we want to have
  34. // a (generous) threshold for considering a job stuck and
  35. // abandoning it
  36. defaultJobTimeout = 60 * time.Second
  37. )
  38. // Daemon is the fully-functional state of a daemon (compare to
  39. // `NotReadyDaemon`).
  40. type Daemon struct {
  41. V string
  42. Cluster cluster.Cluster
  43. Manifests manifests.Manifests
  44. Registry registry.Registry
  45. ImageRefresh chan image.Name
  46. Repo *git.Repo
  47. GitConfig git.Config
  48. Jobs *job.Queue
  49. JobStatusCache *job.StatusCache
  50. EventWriter event.EventWriter
  51. Logger log.Logger
  52. ManifestGenerationEnabled bool
  53. // bookkeeping
  54. *LoopVars
  55. }
  56. // Invariant.
  57. var _ api.Server = &Daemon{}
  58. func (d *Daemon) Version(ctx context.Context) (string, error) {
  59. return d.V, nil
  60. }
  61. func (d *Daemon) Ping(ctx context.Context) error {
  62. return d.Cluster.Ping()
  63. }
  64. func (d *Daemon) Export(ctx context.Context) ([]byte, error) {
  65. return d.Cluster.Export(ctx)
  66. }
  67. type repo interface {
  68. Dir() string
  69. }
  70. func (d *Daemon) getManifestStore(r repo) (manifests.Store, error) {
  71. absPaths := git.MakeAbsolutePaths(r, d.GitConfig.Paths)
  72. if d.ManifestGenerationEnabled {
  73. return manifests.NewConfigAware(r.Dir(), absPaths, d.Manifests)
  74. }
  75. return manifests.NewRawFiles(r.Dir(), absPaths, d.Manifests), nil
  76. }
  77. func (d *Daemon) getResources(ctx context.Context) (map[string]resource.Resource, v6.ReadOnlyReason, error) {
  78. var resources map[string]resource.Resource
  79. var globalReadOnly v6.ReadOnlyReason
  80. err := d.WithReadonlyClone(ctx, func(checkout *git.Export) error {
  81. cm, err := d.getManifestStore(checkout)
  82. if err != nil {
  83. return err
  84. }
  85. resources, err = cm.GetAllResourcesByID(ctx)
  86. return err
  87. })
  88. // The reason something is missing from the map differs depending
  89. // on the state of the git repo.
  90. _, notReady := err.(git.NotReadyError)
  91. switch {
  92. case notReady:
  93. globalReadOnly = v6.ReadOnlyNotReady
  94. case err == git.ErrNoConfig:
  95. globalReadOnly = v6.ReadOnlyNoRepo
  96. case err != nil:
  97. return nil, globalReadOnly, manifestLoadError(err)
  98. default:
  99. globalReadOnly = v6.ReadOnlyMissing
  100. }
  101. return resources, globalReadOnly, nil
  102. }
  103. func (d *Daemon) ListServices(ctx context.Context, namespace string) ([]v6.ControllerStatus, error) {
  104. return d.ListServicesWithOptions(ctx, v11.ListServicesOptions{Namespace: namespace})
  105. }
  106. func (d *Daemon) ListServicesWithOptions(ctx context.Context, opts v11.ListServicesOptions) ([]v6.ControllerStatus, error) {
  107. if opts.Namespace != "" && len(opts.Services) > 0 {
  108. return nil, errors.New("cannot filter by 'namespace' and 'workloads' at the same time")
  109. }
  110. var clusterWorkloads []cluster.Workload
  111. var err error
  112. if len(opts.Services) > 0 {
  113. clusterWorkloads, err = d.Cluster.SomeWorkloads(ctx, opts.Services)
  114. } else {
  115. clusterWorkloads, err = d.Cluster.AllWorkloads(ctx, opts.Namespace)
  116. }
  117. if err != nil {
  118. return nil, errors.Wrap(err, "getting workloads from cluster")
  119. }
  120. resources, missingReason, err := d.getResources(ctx)
  121. if err != nil {
  122. return nil, err
  123. }
  124. var res []v6.ControllerStatus
  125. for _, workload := range clusterWorkloads {
  126. readOnly := v6.ReadOnlyOK
  127. repoIsReadonly := d.Repo.Readonly()
  128. var policies policy.Set
  129. if resource, ok := resources[workload.ID.String()]; ok {
  130. policies = resource.Policies()
  131. }
  132. switch {
  133. case policies == nil:
  134. readOnly = missingReason
  135. case repoIsReadonly:
  136. readOnly = v6.ReadOnlyROMode
  137. case workload.IsSystem:
  138. readOnly = v6.ReadOnlySystem
  139. }
  140. var syncError string
  141. if workload.SyncError != nil {
  142. syncError = workload.SyncError.Error()
  143. }
  144. res = append(res, v6.ControllerStatus{
  145. ID: workload.ID,
  146. Containers: containers2containers(workload.ContainersOrNil()),
  147. ReadOnly: readOnly,
  148. Status: workload.Status,
  149. Rollout: workload.Rollout,
  150. SyncError: syncError,
  151. Antecedent: workload.Antecedent,
  152. Labels: workload.Labels,
  153. Automated: policies.Has(policy.Automated),
  154. Locked: policies.Has(policy.Locked),
  155. Ignore: policies.Has(policy.Ignore),
  156. Policies: policies.ToStringMap(),
  157. })
  158. }
  159. return res, nil
  160. }
  161. type clusterContainers []cluster.Workload
  162. func (cs clusterContainers) Len() int {
  163. return len(cs)
  164. }
  165. func (cs clusterContainers) Containers(i int) []resource.Container {
  166. return cs[i].ContainersOrNil()
  167. }
  168. // ListImages - deprecated from v10, lists the images available for set of workloads
  169. func (d *Daemon) ListImages(ctx context.Context, spec update.ResourceSpec) ([]v6.ImageStatus, error) {
  170. return d.ListImagesWithOptions(ctx, v10.ListImagesOptions{Spec: spec})
  171. }
  172. // ListImagesWithOptions lists the images available for set of workloads
  173. func (d *Daemon) ListImagesWithOptions(ctx context.Context, opts v10.ListImagesOptions) ([]v6.ImageStatus, error) {
  174. if opts.Namespace != "" && opts.Spec != update.ResourceSpecAll {
  175. return nil, errors.New("cannot filter by 'namespace' and 'workload' at the same time")
  176. }
  177. var workloads []cluster.Workload
  178. var err error
  179. if opts.Spec != update.ResourceSpecAll {
  180. id, err := opts.Spec.AsID()
  181. if err != nil {
  182. return nil, errors.Wrap(err, "treating workload spec as ID")
  183. }
  184. workloads, err = d.Cluster.SomeWorkloads(ctx, []resource.ID{id})
  185. if err != nil {
  186. return nil, errors.Wrap(err, "getting some workloads")
  187. }
  188. } else {
  189. workloads, err = d.Cluster.AllWorkloads(ctx, opts.Namespace)
  190. if err != nil {
  191. return nil, errors.Wrap(err, "getting all workloads")
  192. }
  193. }
  194. resources, _, err := d.getResources(ctx)
  195. if err != nil {
  196. return nil, err
  197. }
  198. imageRepos, err := update.FetchImageRepos(d.Registry, clusterContainers(workloads), d.Logger)
  199. if err != nil {
  200. return nil, errors.Wrap(err, "getting images for workloads")
  201. }
  202. var res []v6.ImageStatus
  203. for _, workload := range workloads {
  204. workloadContainers, err := getWorkloadContainers(workload, imageRepos, resources[workload.ID.String()], opts.OverrideContainerFields)
  205. if err != nil {
  206. return nil, err
  207. }
  208. res = append(res, v6.ImageStatus{
  209. ID: workload.ID,
  210. Containers: workloadContainers,
  211. })
  212. }
  213. return res, nil
  214. }
  215. // jobFunc is a type for procedures that the daemon will execute in a job
  216. type jobFunc func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error)
  217. // updateFunc is a type for procedures that operate on a git checkout, to be run in a job
  218. type updateFunc func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error)
  219. // makeJobFromUpdate turns an updateFunc into a jobFunc that will run
  220. // the update with a fresh clone, and log the result as an event.
  221. func (d *Daemon) makeJobFromUpdate(update updateFunc) jobFunc {
  222. return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
  223. var result job.Result
  224. err := d.WithWorkingClone(ctx, func(working *git.Checkout) error {
  225. var err error
  226. if err = verifyWorkingRepo(ctx, d.Repo, working, d.SyncState); d.GitVerifySignatures && err != nil {
  227. return err
  228. }
  229. result, err = update(ctx, jobID, working, logger)
  230. if err != nil {
  231. return err
  232. }
  233. return nil
  234. })
  235. if err != nil {
  236. return result, err
  237. }
  238. return result, nil
  239. }
  240. }
  241. // executeJob runs a job func and keeps track of its status, so the
  242. // daemon can report it when asked.
  243. func (d *Daemon) executeJob(id job.ID, do jobFunc, logger log.Logger) (job.Result, error) {
  244. ctx, cancel := context.WithTimeout(context.Background(), defaultJobTimeout)
  245. defer cancel()
  246. d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusRunning})
  247. result, err := do(ctx, id, logger)
  248. if err != nil {
  249. d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusFailed, Err: err.Error(), Result: result})
  250. return result, err
  251. }
  252. d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusSucceeded, Result: result})
  253. return result, nil
  254. }
  255. // makeLoggingFunc takes a jobFunc and returns a jobFunc that will log
  256. // a commit event with the result.
  257. func (d *Daemon) makeLoggingJobFunc(f jobFunc) jobFunc {
  258. return func(ctx context.Context, id job.ID, logger log.Logger) (job.Result, error) {
  259. started := time.Now().UTC()
  260. result, err := f(ctx, id, logger)
  261. if err != nil {
  262. return result, err
  263. }
  264. logger.Log("revision", result.Revision)
  265. if result.Revision != "" {
  266. var workloadIDs []resource.ID
  267. for id, result := range result.Result {
  268. if result.Status == update.ReleaseStatusSuccess {
  269. workloadIDs = append(workloadIDs, id)
  270. }
  271. }
  272. metadata := &event.CommitEventMetadata{
  273. Revision: result.Revision,
  274. Spec: result.Spec,
  275. Result: result.Result,
  276. }
  277. return result, d.LogEvent(event.Event{
  278. ServiceIDs: workloadIDs,
  279. Type: event.EventCommit,
  280. StartedAt: started,
  281. EndedAt: started,
  282. LogLevel: event.LogLevelInfo,
  283. Metadata: metadata,
  284. })
  285. }
  286. return result, nil
  287. }
  288. }
  289. // queueJob queues a job func to be executed.
  290. func (d *Daemon) queueJob(do jobFunc) job.ID {
  291. id := job.ID(guid.New())
  292. enqueuedAt := time.Now()
  293. d.Jobs.Enqueue(&job.Job{
  294. ID: id,
  295. Do: func(logger log.Logger) error {
  296. queueDuration.Observe(time.Since(enqueuedAt).Seconds())
  297. _, err := d.executeJob(id, do, logger)
  298. if err != nil {
  299. return err
  300. }
  301. return nil
  302. },
  303. })
  304. queueLength.Set(float64(d.Jobs.Len()))
  305. d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusQueued})
  306. return id
  307. }
  308. // Apply the desired changes to the config files
  309. func (d *Daemon) UpdateManifests(ctx context.Context, spec update.Spec) (job.ID, error) {
  310. var id job.ID
  311. if spec.Type == "" {
  312. return id, errors.New("no type in update spec")
  313. }
  314. switch s := spec.Spec.(type) {
  315. case release.Changes:
  316. if s.ReleaseKind() == update.ReleaseKindPlan {
  317. id := job.ID(guid.New())
  318. _, err := d.executeJob(id, d.makeJobFromUpdate(d.release(spec, s)), d.Logger)
  319. return id, err
  320. }
  321. return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.release(spec, s)))), nil
  322. case resource.PolicyUpdates:
  323. return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicies(spec, s)))), nil
  324. case update.ManualSync:
  325. return d.queueJob(d.sync()), nil
  326. default:
  327. return id, fmt.Errorf(`unknown update type "%s"`, spec.Type)
  328. }
  329. }
  330. func (d *Daemon) sync() jobFunc {
  331. return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
  332. var result job.Result
  333. ctx, cancel := context.WithTimeout(ctx, defaultJobTimeout)
  334. defer cancel()
  335. err := d.Repo.Refresh(ctx)
  336. if err != nil {
  337. return result, err
  338. }
  339. head, err := d.Repo.BranchHead(ctx)
  340. if err != nil {
  341. return result, err
  342. }
  343. if d.GitVerifySignatures {
  344. var latestValidRev string
  345. if latestValidRev, _, err = latestValidRevision(ctx, d.Repo, d.SyncState); err != nil {
  346. return result, err
  347. } else if head != latestValidRev {
  348. result.Revision = latestValidRev
  349. return result, fmt.Errorf(
  350. "The branch HEAD in the git repo is not verified, and fluxd is unable to sync to it. The last verified commit was %.8s. HEAD is %.8s.",
  351. latestValidRev,
  352. head,
  353. )
  354. }
  355. }
  356. result.Revision = head
  357. return result, err
  358. }
  359. }
  360. func (d *Daemon) updatePolicies(spec update.Spec, updates resource.PolicyUpdates) updateFunc {
  361. return func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error) {
  362. // For each update
  363. var workloadIDs []resource.ID
  364. result := job.Result{
  365. Spec: &spec,
  366. Result: update.Result{},
  367. }
  368. // A shortcut to make things more responsive: if anything
  369. // was (probably) set to automated, we will ask for an
  370. // automation run straight ASAP.
  371. var anythingAutomated bool
  372. for workloadID, u := range updates {
  373. if d.Cluster.IsAllowedResource(workloadID) {
  374. result.Result[workloadID] = update.WorkloadResult{
  375. Status: update.ReleaseStatusSkipped,
  376. }
  377. }
  378. if policy.Set(u.Add).Has(policy.Automated) {
  379. anythingAutomated = true
  380. }
  381. cm, err := d.getManifestStore(working)
  382. if err != nil {
  383. return result, err
  384. }
  385. updated, err := cm.UpdateWorkloadPolicies(ctx, workloadID, u)
  386. if err != nil {
  387. result.Result[workloadID] = update.WorkloadResult{
  388. Status: update.ReleaseStatusFailed,
  389. Error: err.Error(),
  390. }
  391. switch err := err.(type) {
  392. case manifests.StoreError:
  393. result.Result[workloadID] = update.WorkloadResult{
  394. Status: update.ReleaseStatusFailed,
  395. Error: err.Error(),
  396. }
  397. default:
  398. return result, err
  399. }
  400. }
  401. if !updated {
  402. result.Result[workloadID] = update.WorkloadResult{
  403. Status: update.ReleaseStatusSkipped,
  404. }
  405. } else {
  406. workloadIDs = append(workloadIDs, workloadID)
  407. result.Result[workloadID] = update.WorkloadResult{
  408. Status: update.ReleaseStatusSuccess,
  409. }
  410. }
  411. }
  412. if len(workloadIDs) == 0 {
  413. return result, nil
  414. }
  415. commitAuthor := ""
  416. if d.GitConfig.SetAuthor {
  417. commitAuthor = spec.Cause.User
  418. }
  419. commitAction := git.CommitAction{
  420. Author: commitAuthor,
  421. Message: policyCommitMessage(updates, spec.Cause),
  422. }
  423. if err := working.CommitAndPush(ctx, commitAction, &note{JobID: jobID, Spec: spec}, d.ManifestGenerationEnabled); err != nil {
  424. // On the chance pushing failed because it was not
  425. // possible to fast-forward, ask for a sync so the
  426. // next attempt is more likely to succeed.
  427. d.AskForSync()
  428. return result, err
  429. }
  430. if anythingAutomated {
  431. d.AskForAutomatedWorkloadImageUpdates()
  432. }
  433. var err error
  434. result.Revision, err = working.HeadRevision(ctx)
  435. if err != nil {
  436. return result, err
  437. }
  438. return result, nil
  439. }
  440. }
  441. func (d *Daemon) release(spec update.Spec, c release.Changes) updateFunc {
  442. return func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error) {
  443. var zero job.Result
  444. rs, err := d.getManifestStore(working)
  445. if err != nil {
  446. return zero, err
  447. }
  448. rc := release.NewReleaseContext(d.Cluster, rs, d.Registry)
  449. result, err := release.Release(ctx, rc, c, logger)
  450. if err != nil {
  451. return zero, err
  452. }
  453. var revision string
  454. if c.ReleaseKind() == update.ReleaseKindExecute {
  455. commitMsg := spec.Cause.Message
  456. if commitMsg == "" {
  457. commitMsg = c.CommitMessage(result)
  458. }
  459. commitAuthor := ""
  460. if d.GitConfig.SetAuthor {
  461. commitAuthor = spec.Cause.User
  462. }
  463. commitAction := git.CommitAction{
  464. Author: commitAuthor,
  465. Message: commitMsg,
  466. }
  467. if err := working.CommitAndPush(ctx, commitAction, &note{JobID: jobID, Spec: spec, Result: result}, d.ManifestGenerationEnabled); err != nil {
  468. // On the chance pushing failed because it was not
  469. // possible to fast-forward, ask the repo to fetch
  470. // from upstream ASAP, so the next attempt is more
  471. // likely to succeed.
  472. d.Repo.Notify()
  473. return zero, err
  474. }
  475. revision, err = working.HeadRevision(ctx)
  476. if err != nil {
  477. return zero, err
  478. }
  479. }
  480. return job.Result{
  481. Revision: revision,
  482. Spec: &spec,
  483. Result: result,
  484. }, nil
  485. }
  486. }
  487. // Tell the daemon to synchronise the cluster with the manifests in
  488. // the git repo. This has an error return value because upstream there
  489. // may be comms difficulties or other sources of problems; here, we
  490. // always succeed because it's just bookkeeping.
  491. func (d *Daemon) NotifyChange(ctx context.Context, change v9.Change) error {
  492. switch change.Kind {
  493. case v9.GitChange:
  494. gitUpdate := change.Source.(v9.GitUpdate)
  495. if gitUpdate.URL != d.Repo.Origin().URL && gitUpdate.Branch != d.GitConfig.Branch {
  496. // It isn't strictly an _error_ to be notified about a repo/branch pair
  497. // that isn't ours, but it's worth logging anyway for debugging.
  498. d.Logger.Log("msg", "notified about unrelated change",
  499. "url", gitUpdate.URL,
  500. "branch", gitUpdate.Branch)
  501. break
  502. }
  503. d.Repo.Notify()
  504. case v9.ImageChange:
  505. imageUpdate := change.Source.(v9.ImageUpdate)
  506. d.ImageRefresh <- imageUpdate.Name
  507. }
  508. return nil
  509. }
  510. // JobStatus - Ask the daemon how far it's got committing things; in particular, is the job
  511. // queued? running? committed? If it is done, the commit ref is returned.
  512. func (d *Daemon) JobStatus(ctx context.Context, jobID job.ID) (job.Status, error) {
  513. // Is the job queued, running, or recently finished?
  514. status, ok := d.JobStatusCache.Status(jobID)
  515. if ok {
  516. return status, nil
  517. }
  518. // Look through the commits for a note referencing this job. This
  519. // means that even if fluxd restarts, we will at least remember
  520. // jobs which have pushed a commit.
  521. notes, err := d.Repo.NoteRevList(ctx, d.GitConfig.NotesRef)
  522. if err != nil {
  523. return status, errors.Wrap(err, "enumerating commit notes")
  524. }
  525. commits, err := d.Repo.CommitsBefore(ctx, "HEAD", d.GitConfig.Paths...)
  526. if err != nil {
  527. return status, errors.Wrap(err, "checking revisions for status")
  528. }
  529. for _, commit := range commits {
  530. if _, ok := notes[commit.Revision]; ok {
  531. var n note
  532. ok, err := d.Repo.GetNote(ctx, commit.Revision, d.GitConfig.NotesRef, &n)
  533. if ok && err == nil && n.JobID == jobID {
  534. status = job.Status{
  535. StatusString: job.StatusSucceeded,
  536. Result: job.Result{
  537. Revision: commit.Revision,
  538. Spec: &n.Spec,
  539. Result: n.Result,
  540. },
  541. }
  542. return status, nil
  543. }
  544. }
  545. }
  546. return status, unknownJobError(jobID)
  547. }
  548. // Ask the daemon how far it's got applying things; in particular, is it
  549. // past the given commit? Return the list of commits between where
  550. // we have applied (the sync tag) and the ref given, inclusive. E.g., if you send HEAD,
  551. // you'll get all the commits yet to be applied. If you send a hash
  552. // and it's applied at or _past_ it, you'll get an empty list.
  553. func (d *Daemon) SyncStatus(ctx context.Context, commitRef string) ([]string, error) {
  554. syncMarkerRevision, err := d.SyncState.GetRevision(ctx)
  555. if err != nil {
  556. return nil, err
  557. }
  558. commits, err := d.Repo.CommitsBetween(ctx, syncMarkerRevision, commitRef, d.GitConfig.Paths...)
  559. if err != nil {
  560. return nil, err
  561. }
  562. // NB we could use the messages too if we decide to change the
  563. // signature of the API to include it.
  564. revs := make([]string, len(commits))
  565. for i, commit := range commits {
  566. revs[i] = commit.Revision
  567. }
  568. return revs, nil
  569. }
  570. func (d *Daemon) GitRepoConfig(ctx context.Context, regenerate bool) (v6.GitConfig, error) {
  571. publicSSHKey, err := d.Cluster.PublicSSHKey(regenerate)
  572. if err != nil {
  573. return v6.GitConfig{}, err
  574. }
  575. origin := d.Repo.Origin()
  576. status, _ := d.Repo.Status()
  577. path := ""
  578. if len(d.GitConfig.Paths) > 0 {
  579. path = strings.Join(d.GitConfig.Paths, ",")
  580. }
  581. return v6.GitConfig{
  582. Remote: v6.GitRemoteConfig{
  583. URL: origin.URL,
  584. Branch: d.GitConfig.Branch,
  585. Path: path,
  586. },
  587. PublicSSHKey: publicSSHKey,
  588. Status: status,
  589. }, nil
  590. }
  591. // Non-api.Server methods
  592. // WithWorkingClone applies the given func to a fresh, writable clone
  593. // of the git repo, and cleans it up afterwards. This may return an
  594. // error in the case that the repo is read-only; use
  595. // `WithReadonlyClone` if you only need to read the files in the git
  596. // repo.
  597. func (d *Daemon) WithWorkingClone(ctx context.Context, fn func(*git.Checkout) error) error {
  598. co, err := d.Repo.Clone(ctx, d.GitConfig)
  599. if err != nil {
  600. return err
  601. }
  602. defer co.Clean()
  603. return fn(co)
  604. }
  605. // WithReadonlyClone applies the given func to an export of the
  606. // current revision of the git repo. Use this if you just need to
  607. // consult the files.
  608. func (d *Daemon) WithReadonlyClone(ctx context.Context, fn func(*git.Export) error) error {
  609. head, err := d.Repo.BranchHead(ctx)
  610. if err != nil {
  611. return err
  612. }
  613. co, err := d.Repo.Export(ctx, head)
  614. if err != nil {
  615. return err
  616. }
  617. defer co.Clean()
  618. return fn(co)
  619. }
  620. func (d *Daemon) LogEvent(ev event.Event) error {
  621. if d.EventWriter == nil {
  622. d.Logger.Log("event", ev, "logupstream", "false")
  623. return nil
  624. }
  625. d.Logger.Log("event", ev, "logupstream", "true")
  626. return d.EventWriter.LogEvent(ev)
  627. }
  628. // vvv helpers vvv
  629. func containers2containers(cs []resource.Container) []v6.Container {
  630. res := make([]v6.Container, len(cs))
  631. for i, c := range cs {
  632. res[i] = v6.Container{
  633. Name: c.Name,
  634. Current: image.Info{
  635. ID: c.Image,
  636. },
  637. }
  638. }
  639. return res
  640. }
  641. // Much of the time, images will be sorted by timestamp. At marginal
  642. // cost, we cache the result of sorting, so that other uses of the
  643. // image can reuse it (if they are also sorted by timestamp).
  644. type sortedImageRepo struct {
  645. images []image.Info
  646. imagesByTag map[string]image.Info
  647. imagesSortedByCreated update.SortedImageInfos
  648. }
  649. func (r *sortedImageRepo) SortedImages(p policy.Pattern) update.SortedImageInfos {
  650. // RequiresTimestamp means "ordered by timestamp" (it's required
  651. // because no comparison to see which image is newer can be made
  652. // if a timestamp is missing)
  653. if p.RequiresTimestamp() {
  654. if r.imagesSortedByCreated == nil {
  655. r.imagesSortedByCreated = update.SortImages(r.images, p)
  656. }
  657. return r.imagesSortedByCreated
  658. }
  659. return update.SortImages(r.images, p)
  660. }
  661. func (r *sortedImageRepo) Images() []image.Info {
  662. return r.images
  663. }
  664. func (r *sortedImageRepo) ImageByTag(tag string) image.Info {
  665. return r.imagesByTag[tag]
  666. }
  667. func getWorkloadContainers(workload cluster.Workload, imageRepos update.ImageRepos, resource resource.Resource, fields []string) (res []v6.Container, err error) {
  668. repos := map[image.Name]*sortedImageRepo{}
  669. for _, c := range workload.ContainersOrNil() {
  670. imageName := c.Image.Name
  671. var policies policy.Set
  672. if resource != nil {
  673. policies = resource.Policies()
  674. }
  675. tagPattern := policy.GetTagPattern(policies, c.Name)
  676. imageRepo, ok := repos[imageName]
  677. if !ok {
  678. repoMetadata := imageRepos.GetRepositoryMetadata(imageName)
  679. var images []image.Info
  680. // Build images, tolerating tags with missing metadata
  681. for _, tag := range repoMetadata.Tags {
  682. info, ok := repoMetadata.Images[tag]
  683. if !ok {
  684. info = image.Info{
  685. ID: image.Ref{Tag: tag},
  686. }
  687. }
  688. images = append(images, info)
  689. }
  690. imageRepo = &sortedImageRepo{images: images, imagesByTag: repoMetadata.Images}
  691. repos[imageName] = imageRepo
  692. }
  693. currentImage := imageRepo.ImageByTag(c.Image.Tag)
  694. container, err := v6.NewContainer(c.Name, imageRepo, currentImage, tagPattern, fields)
  695. if err != nil {
  696. return res, err
  697. }
  698. res = append(res, container)
  699. }
  700. return res, nil
  701. }
  702. func policyCommitMessage(us resource.PolicyUpdates, cause update.Cause) string {
  703. // shortcut, since we want roughly the same information
  704. events := policyEvents(us, time.Now())
  705. commitMsg := &bytes.Buffer{}
  706. prefix := "- "
  707. switch {
  708. case cause.Message != "":
  709. fmt.Fprintf(commitMsg, "%s\n\n", cause.Message)
  710. case len(events) > 1:
  711. fmt.Fprintf(commitMsg, "Updated workload policies\n\n")
  712. default:
  713. prefix = ""
  714. }
  715. for _, event := range events {
  716. fmt.Fprintf(commitMsg, "%s%v\n", prefix, event)
  717. }
  718. return commitMsg.String()
  719. }
  720. // policyEvents builds a map of events (by type), for all the events in this set of
  721. // updates. There will be one event per type, containing all workload ids
  722. // affected by that event. e.g. all automated workload will share an event.
  723. func policyEvents(us resource.PolicyUpdates, now time.Time) map[string]event.Event {
  724. eventsByType := map[string]event.Event{}
  725. for workloadID, update := range us {
  726. for _, eventType := range policyEventTypes(update) {
  727. e, ok := eventsByType[eventType]
  728. if !ok {
  729. e = event.Event{
  730. ServiceIDs: []resource.ID{},
  731. Type: eventType,
  732. StartedAt: now,
  733. EndedAt: now,
  734. LogLevel: event.LogLevelInfo,
  735. }
  736. }
  737. e.ServiceIDs = append(e.ServiceIDs, workloadID)
  738. eventsByType[eventType] = e
  739. }
  740. }
  741. return eventsByType
  742. }
  743. // policyEventTypes is a deduped list of all event types this update contains
  744. func policyEventTypes(u resource.PolicyUpdate) []string {
  745. types := map[string]struct{}{}
  746. for p := range u.Add {
  747. switch {
  748. case p == policy.Automated:
  749. types[event.EventAutomate] = struct{}{}
  750. case p == policy.Locked:
  751. types[event.EventLock] = struct{}{}
  752. default:
  753. types[event.EventUpdatePolicy] = struct{}{}
  754. }
  755. }
  756. for p := range u.Remove {
  757. switch {
  758. case p == policy.Automated:
  759. types[event.EventDeautomate] = struct{}{}
  760. case p == policy.Locked:
  761. types[event.EventUnlock] = struct{}{}
  762. default:
  763. types[event.EventUpdatePolicy] = struct{}{}
  764. }
  765. }
  766. var result []string
  767. for t := range types {
  768. result = append(result, t)
  769. }
  770. sort.Strings(result)
  771. return result
  772. }
  773. // latestValidRevision returns the HEAD of the configured branch if it
  774. // has a valid signature, or the SHA of the latest valid commit it
  775. // could find plus the invalid commit thereafter.
  776. //
  777. // Signature validation happens for commits between the revision of the
  778. // sync tag and the HEAD, after the signature of the sync tag itself
  779. // has been validated, as the branch can not be trusted when the tag
  780. // originates from an unknown source.
  781. //
  782. // In case the signature of the tag can not be verified, or it points
  783. // towards a revision we can not get a commit range for, it returns an
  784. // error.
  785. func latestValidRevision(ctx context.Context, repo *git.Repo, syncState sync.State) (string, git.Commit, error) {
  786. var invalidCommit = git.Commit{}
  787. newRevision, err := repo.BranchHead(ctx)
  788. if err != nil {
  789. return "", invalidCommit, err
  790. }
  791. // Validate sync state and retrieve the revision it points to
  792. tagRevision, err := syncState.GetRevision(ctx)
  793. if err != nil {
  794. return "", invalidCommit, err
  795. }
  796. var commits []git.Commit
  797. if tagRevision == "" {
  798. commits, err = repo.CommitsBefore(ctx, newRevision)
  799. } else {
  800. // Assure the commit _at_ the high water mark is a signed and valid commit
  801. if err = repo.VerifyCommit(ctx, tagRevision); err != nil {
  802. return "", invalidCommit, errors.Wrap(err, "failed to verify signature of last sync'ed revision")
  803. }
  804. commits, err = repo.CommitsBetween(ctx, tagRevision, newRevision)
  805. }
  806. if err != nil {
  807. return tagRevision, invalidCommit, err
  808. }
  809. // Loop through commits in ascending order, validating the
  810. // signature of each commit. In case we hit an invalid commit, we
  811. // return the revision of the commit before that, as that one is
  812. // valid.
  813. for i := len(commits) - 1; i >= 0; i-- {
  814. if !commits[i].Signature.Valid() {
  815. if i+1 < len(commits) {
  816. return commits[i+1].Revision, commits[i], nil
  817. }
  818. return tagRevision, commits[i], nil
  819. }
  820. }
  821. return newRevision, invalidCommit, nil
  822. }
  823. // verifyWorkingRepo checks that a working clone is safe to be used for a write operation
  824. func verifyWorkingRepo(ctx context.Context, repo *git.Repo, working *git.Checkout, syncState sync.State) error {
  825. if latestVerifiedRev, _, err := latestValidRevision(ctx, repo, syncState); err != nil {
  826. return err
  827. } else if headRev, err := working.HeadRevision(ctx); err != nil {
  828. return err
  829. } else if headRev != latestVerifiedRev {
  830. return unsignedHeadRevisionError(latestVerifiedRev, headRev)
  831. }
  832. return nil
  833. }