diff --git a/cmd/entrypoint/main.go b/cmd/entrypoint/main.go index 71f2fe232ad..5bd73c3e8cf 100644 --- a/cmd/entrypoint/main.go +++ b/cmd/entrypoint/main.go @@ -29,22 +29,26 @@ import ( ) var ( - ep = flag.String("entrypoint", "", "Original specified entrypoint to execute") - waitFile = flag.String("wait_file", "", "If specified, file to wait for") - postFile = flag.String("post_file", "", "If specified, file to write upon completion") + ep = flag.String("entrypoint", "", "Original specified entrypoint to execute") + waitFile = flag.String("wait_file", "", "If specified, file to wait for") + waitFileContent = flag.Bool("wait_file_content", false, "If specified, expect wait_file to have content") + postFile = flag.String("post_file", "", "If specified, file to write upon completion") + + waitPollingInterval = time.Second ) func main() { flag.Parse() e := entrypoint.Entrypointer{ - Entrypoint: *ep, - WaitFile: *waitFile, - PostFile: *postFile, - Args: flag.Args(), - Waiter: &RealWaiter{}, - Runner: &RealRunner{}, - PostWriter: &RealPostWriter{}, + Entrypoint: *ep, + WaitFile: *waitFile, + WaitFileContent: *waitFileContent, + PostFile: *postFile, + Args: flag.Args(), + Waiter: &RealWaiter{}, + Runner: &RealRunner{}, + PostWriter: &RealPostWriter{}, } if err := e.Go(); err != nil { switch err.(type) { @@ -75,18 +79,27 @@ type RealWaiter struct{} var _ entrypoint.Waiter = (*RealWaiter)(nil) -func (*RealWaiter) Wait(file string) error { +// Wait watches a file and returns when either a) the file exists and, if +// the expectContent argument is true, the file has non-zero size or b) there +// is an error polling the file. +// +// If the passed-in file is an empty string then this function returns +// immediately. +// +// If a file of the same name with a ".err" extension exists then this Wait +// will end with a skipError. +func (*RealWaiter) Wait(file string, expectContent bool) error { if file == "" { return nil } - for ; ; time.Sleep(time.Second) { - // Watch for the post file - if _, err := os.Stat(file); err == nil { - return nil + for ; ; time.Sleep(waitPollingInterval) { + if info, err := os.Stat(file); err == nil { + if !expectContent || info.Size() > 0 { + return nil + } } else if !os.IsNotExist(err) { return xerrors.Errorf("Waiting for %q: %w", file, err) } - // Watch for the post error file if _, err := os.Stat(file + ".err"); err == nil { return skipError("error file present, bail and skip the step") } diff --git a/cmd/entrypoint/main_test.go b/cmd/entrypoint/main_test.go new file mode 100644 index 00000000000..05ad1d97050 --- /dev/null +++ b/cmd/entrypoint/main_test.go @@ -0,0 +1,106 @@ +package main + +import ( + "io/ioutil" + "os" + "testing" + "time" +) + +func TestRealWaiterWaitMissingFile(t *testing.T) { + // Create a temp file and then immediately delete it to get + // a legitimate tmp path and ensure the file doesnt exist + // prior to testing Wait(). + tmp, err := ioutil.TempFile("", "real_waiter_test_file") + if err != nil { + t.Errorf("error creating temp file: %v", err) + } + os.Remove(tmp.Name()) + rw := RealWaiter{} + doneCh := make(chan struct{}) + go func() { + err := rw.Wait(tmp.Name(), false) + if err != nil { + t.Errorf("error waiting on tmp file %q", tmp.Name()) + } + close(doneCh) + }() + select { + case <-doneCh: + t.Errorf("did not expect Wait() to have detected a file at path %q", tmp.Name()) + case <-time.After(2 * waitPollingInterval): + // Success + } +} + +func TestRealWaiterWaitWithFile(t *testing.T) { + tmp, err := ioutil.TempFile("", "real_waiter_test_file") + if err != nil { + t.Errorf("error creating temp file: %v", err) + } + defer os.Remove(tmp.Name()) + rw := RealWaiter{} + doneCh := make(chan struct{}) + go func() { + err := rw.Wait(tmp.Name(), false) + if err != nil { + t.Errorf("error waiting on tmp file %q", tmp.Name()) + } + close(doneCh) + }() + select { + case <-doneCh: + // Success + case <-time.After(2 * waitPollingInterval): + t.Errorf("expected Wait() to have detected the file's existence by now") + } +} + +func TestRealWaiterWaitMissingContent(t *testing.T) { + tmp, err := ioutil.TempFile("", "real_waiter_test_file") + if err != nil { + t.Errorf("error creating temp file: %v", err) + } + defer os.Remove(tmp.Name()) + rw := RealWaiter{} + doneCh := make(chan struct{}) + go func() { + err := rw.Wait(tmp.Name(), true) + if err != nil { + t.Errorf("error waiting on tmp file %q", tmp.Name()) + } + close(doneCh) + }() + select { + case <-doneCh: + t.Errorf("no data was written to tmp file, did not expect Wait() to have detected a non-zero file size and returned") + case <-time.After(2 * waitPollingInterval): + // Success + } +} + +func TestRealWaiterWaitWithContent(t *testing.T) { + tmp, err := ioutil.TempFile("", "real_waiter_test_file") + if err != nil { + t.Errorf("error creating temp file: %v", err) + } + defer os.Remove(tmp.Name()) + rw := RealWaiter{} + doneCh := make(chan struct{}) + go func() { + err := rw.Wait(tmp.Name(), true) + if err != nil { + t.Errorf("error waiting on tmp file %q", tmp.Name()) + } + close(doneCh) + }() + if err := ioutil.WriteFile(tmp.Name(), []byte("😺"), 0700); err != nil { + t.Errorf("error writing content to temp file: %v", err) + } + select { + case <-doneCh: + // Success + case <-time.After(2 * waitPollingInterval): + t.Errorf("expected Wait() to have detected a non-zero file size by now") + } +} diff --git a/cmd/nop/main.go b/cmd/nop/main.go index befa032366b..8d87dd6e3cc 100644 --- a/cmd/nop/main.go +++ b/cmd/nop/main.go @@ -14,6 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// The nop command is a no-op, it simply prints a message and exits. Nop +// is used to stop sidecar containers in TaskRun Pods. When a Task's Steps +// are complete any sidecars running alongside the Step containers need +// to be terminated. Whatever image the sidecars are running is replaced +// with nop and the sidecar quickly exits. + package main import "fmt" diff --git a/docs/developers/README.md b/docs/developers/README.md index 77c0da17e5c..29654e2a964 100644 --- a/docs/developers/README.md +++ b/docs/developers/README.md @@ -137,6 +137,7 @@ manage the execution order of the containers. The `entrypoint` binary has the following arguments: - `wait_file` - If specified, file to wait for +- `wait_file_content` - If specified, wait until the file has non-zero size - `post_file` - If specified, file to write upon completion - `entrypoint` - The command to run in the image being wrapped @@ -155,3 +156,32 @@ such as the following: - The environment variable HOME is set to `/builder/home`, used by the builder tools and injected on into all of the step containers - Default location for output-images `/builder/output-images` + +## Handling of injected sidecars + +Tekton has to take some special steps to support sidecars that are injected into +TaskRun Pods. Without intervention sidecars will typically run for the entire +lifetime of a Pod but in Tekton's case it's desirable for the sidecars to run +only as long as Steps take to complete. There's also a need for Tekton to +schedule the sidecars to start before a Task's Steps begin, just in case the +Steps rely on a sidecars behaviour, for example to join an Istio service mesh. +To handle all of this, Tekton Pipelines implements the following lifecycle +for sidecar containers: + +First, the [Downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#the-downward-api) +is used to project an annotation on the TaskRun's Pod into the `entrypoint` +container as a file. The annotation starts as an empty string, so the file +projected by the downward API has zero length. The entrypointer spins, waiting +for that file to have non-zero size. + +The sidecar containers start up. Once they're all in a ready state, the +annotation is populated with string "READY", which in turn populates the +Downward API projected file. The entrypoint binary recognizes +that the projected file has a non-zero size and allows the Task's steps to +begin. + +On completion of all steps in a Task the TaskRun reconciler stops any +sidecar containers. The `Image` field of any sidecar containers is swapped +to the nop image. Kubernetes observes the change and relaunches the container +with updated container image. The nop container image exits. The container +is considered `Terminated` by Kubernetes and the TaskRun's Pod stops. diff --git a/docs/taskruns.md b/docs/taskruns.md index d0f13e1fbcc..8d80a0a5f3f 100644 --- a/docs/taskruns.md +++ b/docs/taskruns.md @@ -20,6 +20,7 @@ A `TaskRun` runs until all `steps` have completed or until a failure occurs. - [Steps](#steps) - [Cancelling a TaskRun](#cancelling-a-taskrun) - [Examples](#examples) +- [Sidecars](#sidecars) - [Logs](logs.md) --- @@ -544,6 +545,29 @@ of the `Task` resource object. For examples and more information about specifying service accounts, see the [`ServiceAccount`](./auth.md) reference topic. +## Sidecars + +A well-established pattern in Kubernetes is that of the "sidecar" - a +container which runs alongside your workloads to provide ancillary support. +Typical examples of the sidecar pattern are logging daemons, services to +update files on a shared volume, and network proxies. + +Tekton doesn't provide a mechanism to specify sidecars for Task steps +but it's still possible for sidecars to be added to your Pods: +[Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) +provide cluster admins a mechanism to inject sidecar containers as Pods launch. +As a concrete example this is one possible method [used by Istio](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#automatic-sidecar-injection) +to inject an envoy proxy in to pods so that they can be included as part of +Istio's service mesh. + +Tekton will happily work with sidecars injected into a TaskRun's +pods but the behaviour is a bit nuanced: When TaskRun's steps are complete +any sidecar containers running inside the Pod will be terminated. In +order to terminate the sidecars they will be restarted with a new +"nop" image that quickly exits. The result will be that your TaskRun's +Pod will include the sidecar container with a Retry Count of 1 and +with a different container image than you might be expecting. + --- Except as otherwise noted, the content of this page is licensed under the diff --git a/pkg/entrypoint/entrypointer.go b/pkg/entrypoint/entrypointer.go index 9c76b11fa3e..b6a7d784105 100644 --- a/pkg/entrypoint/entrypointer.go +++ b/pkg/entrypoint/entrypointer.go @@ -30,6 +30,9 @@ type Entrypointer struct { // WaitFile is the file to wait for. If not specified, execution begins // immediately. WaitFile string + // WaitFileContent indicates the WaitFile should have non-zero size + // before continuing with execution. + WaitFileContent bool // PostFile is the file to write when complete. If not specified, no // file is written. PostFile string @@ -45,7 +48,7 @@ type Entrypointer struct { // Waiter encapsulates waiting for files to exist. type Waiter interface { // Wait blocks until the specified file exists. - Wait(file string) error + Wait(file string, expectContent bool) error } // Runner encapsulates running commands. @@ -63,7 +66,7 @@ type PostWriter interface { // post file. func (e Entrypointer) Go() error { if e.WaitFile != "" { - if err := e.Waiter.Wait(e.WaitFile); err != nil { + if err := e.Waiter.Wait(e.WaitFile, e.WaitFileContent); err != nil { // An error happened while waiting, so we bail // *but* we write postfile to make next steps bail too e.WritePostFile(e.PostFile, err) diff --git a/pkg/entrypoint/entrypointer_test.go b/pkg/entrypoint/entrypointer_test.go index 3fcf0cfd440..4b413ca0e59 100644 --- a/pkg/entrypoint/entrypointer_test.go +++ b/pkg/entrypoint/entrypointer_test.go @@ -175,7 +175,7 @@ func TestEntrypointer(t *testing.T) { type fakeWaiter struct{ waited *string } -func (f *fakeWaiter) Wait(file string) error { +func (f *fakeWaiter) Wait(file string, expectContent bool) error { f.waited = &file return nil } @@ -193,7 +193,7 @@ func (f *fakePostWriter) Write(file string) { f.wrote = &file } type fakeErrorWaiter struct{ waited *string } -func (f *fakeErrorWaiter) Wait(file string) error { +func (f *fakeErrorWaiter) Wait(file string, expectContent bool) error { f.waited = &file return xerrors.New("waiter failed") } diff --git a/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go b/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go index 381e8458ddd..c0af92c4ec4 100644 --- a/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go +++ b/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint.go @@ -37,18 +37,25 @@ import ( const ( // MountName is the name of the pvc being mounted (which // will contain the entrypoint binary and eventually the logs) - MountName = "tools" - MountPoint = "/builder/tools" - BinaryLocation = MountPoint + "/entrypoint" - JSONConfigEnvVar = "ENTRYPOINT_OPTIONS" - InitContainerName = "place-tools" - cacheSize = 1024 + MountName = "tools" + MountPoint = "/builder/tools" + DownwardMountName = "downward" + DownwardMountPoint = "/builder/downward" + DownwardMountReadyFile = "ready" + BinaryLocation = MountPoint + "/entrypoint" + JSONConfigEnvVar = "ENTRYPOINT_OPTIONS" + InitContainerName = "place-tools" + cacheSize = 1024 ) var toolsMount = corev1.VolumeMount{ Name: MountName, MountPath: MountPoint, } +var downwardMount = corev1.VolumeMount{ + Name: DownwardMountName, + MountPath: DownwardMountPoint, +} var ( entrypointImage = flag.String("entrypoint-image", "override-with-entrypoint:latest", "The container image containing our entrypoint binary.") @@ -135,16 +142,19 @@ func RedirectStep(cache *Cache, stepNum int, step *corev1.Container, kubeclient step.Args = GetArgs(stepNum, step.Command, step.Args) step.Command = []string{BinaryLocation} step.VolumeMounts = append(step.VolumeMounts, toolsMount) + // The first step in a Task waits for the existence of a file projected into the Pod + // from the Downward API. That file will be populated only when all sidecars are ready, + // thereby ensuring that steps don't start executing while helper sidecars are still pending. + if stepNum == 0 { + step.VolumeMounts = append(step.VolumeMounts, downwardMount) + } return nil } // GetArgs returns the arguments that should be specified for the step which has been wrapped // such that it will execute our custom entrypoint instead of the user provided Command and Args. func GetArgs(stepNum int, commands, args []string) []string { - waitFile := fmt.Sprintf("%s/%s", MountPoint, strconv.Itoa(stepNum-1)) - if stepNum == 0 { - waitFile = "" - } + waitFile := getWaitFile(stepNum) // The binary we want to run must be separated from its arguments by -- // so if commands has more than one value, we'll move the other values // into the arg list so we can separate them @@ -152,17 +162,28 @@ func GetArgs(stepNum int, commands, args []string) []string { args = append(commands[1:], args...) commands = commands[:1] } - argsForEntrypoint := append([]string{ + argsForEntrypoint := []string{ "-wait_file", waitFile, - "-post_file", fmt.Sprintf("%s/%s", MountPoint, strconv.Itoa(stepNum)), - "-entrypoint"}, - commands..., - ) + "-post_file", getWaitFile(stepNum + 1), + } + if stepNum == 0 { + argsForEntrypoint = append(argsForEntrypoint, "-wait_file_content") + } + argsForEntrypoint = append(argsForEntrypoint, "-entrypoint") + argsForEntrypoint = append(argsForEntrypoint, commands...) // TODO: what if Command has multiple elements, do we need "--" between command and args? argsForEntrypoint = append(argsForEntrypoint, "--") return append(argsForEntrypoint, args...) } +func getWaitFile(stepNum int) string { + if stepNum == 0 { + return fmt.Sprintf("%s/%s", DownwardMountPoint, DownwardMountReadyFile) + } + + return fmt.Sprintf("%s/%s", MountPoint, strconv.Itoa(stepNum-1)) +} + // GetRemoteEntrypoint accepts a cache of digest lookups, as well as the digest // to look for. If the cache does not contain the digest, it will lookup the // metadata from the images registry, and then commit that to the cache diff --git a/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint_test.go b/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint_test.go index ddc91669a24..887a327d22d 100644 --- a/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/entrypoint/entrypoint_test.go @@ -93,8 +93,9 @@ func TestGetArgs(t *testing.T) { commands: []string{"echo"}, args: []string{"hello", "world"}, expectedArgs: []string{ - "-wait_file", "", + "-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", + "-wait_file_content", "-entrypoint", "echo", "--", "hello", "world", diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/pod.go b/pkg/reconciler/v1alpha1/taskrun/resources/pod.go index 107e7292150..939f02e0cd0 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/pod.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/pod.go @@ -87,22 +87,21 @@ var ( ) const ( - // Prefixes to add to the name of the containers. + // Prefixes to add to the name of the init containers. containerPrefix = "step-" unnamedInitContainerPrefix = "step-unnamed-" // Name of the credential initialization container. credsInit = "credential-initializer" // Name of the working dir initialization container. - workingDirInit = "working-dir-initializer" + workingDirInit = "working-dir-initializer" + ReadyAnnotation = "tekton.dev/ready" + readyAnnotationValue = "READY" ) var ( // The container used to initialize credentials before the build runs. credsImage = flag.String("creds-image", "override-with-creds:latest", "The container image for preparing our Build's credentials.") - // The container that just prints Task completed successfully. - nopImage = flag.String("nop-image", "override-with-nop:latest", - "The container image run at the end of the build to log task success") ) func makeCredentialInitializer(serviceAccountName, namespace string, kubeclient kubernetes.Interface) (*corev1.Container, []corev1.Volume, error) { @@ -290,12 +289,6 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k } gibberish := hex.EncodeToString(b) - nopContainer := &corev1.Container{Name: "nop", Image: *nopImage, Command: []string{"/ko-app/nop"}} - if err := entrypoint.RedirectStep(cache, len(podContainers), nopContainer, kubeclient, taskRun, logger); err != nil { - return nil, err - } - podContainers = append(podContainers, *nopContainer) - mergedInitContainers, err := merge.CombineStepsWithContainerTemplate(taskSpec.ContainerTemplate, initContainers) if err != nil { return nil, err @@ -335,6 +328,28 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k }, nil } +type UpdatePod func(*corev1.Pod) (*corev1.Pod, error) + +// AddReadyAnnotation adds the ready annotation if it is not present. +// Returns any error that comes back from the passed-in update func. +func AddReadyAnnotation(p *corev1.Pod, update UpdatePod) error { + if p.ObjectMeta.Annotations == nil { + p.ObjectMeta.Annotations = make(map[string]string) + } + if p.ObjectMeta.Annotations[ReadyAnnotation] != readyAnnotationValue { + p.ObjectMeta.Annotations[ReadyAnnotation] = readyAnnotationValue + _, err := update(p) + + return err + } + + return nil +} + +func IsContainerStep(name string) bool { + return strings.HasPrefix(name, containerPrefix) +} + // makeLabels constructs the labels we will propagate from TaskRuns to Pods. func makeLabels(s *v1alpha1.TaskRun) map[string]string { labels := make(map[string]string, len(s.ObjectMeta.Labels)+1) @@ -345,13 +360,14 @@ func makeLabels(s *v1alpha1.TaskRun) map[string]string { return labels } -// makeAnnotations constructs the annotations we will propagate from TaskRuns to Pods. +// makeAnnotations constructs the annotations we will propagate from TaskRuns to Pods +// and adds any other annotations that will be needed to initialize a Pod. func makeAnnotations(s *v1alpha1.TaskRun) map[string]string { annotations := make(map[string]string, len(s.ObjectMeta.Annotations)+1) for k, v := range s.ObjectMeta.Annotations { annotations[k] = v } - annotations["sidecar.istio.io/inject"] = "false" + annotations[ReadyAnnotation] = "" return annotations } diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/pod_test.go b/pkg/reconciler/v1alpha1/taskrun/resources/pod_test.go index 4112d293df7..d029f8752a3 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/pod_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/pod_test.go @@ -41,16 +41,6 @@ var ( resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }) - nopContainer = corev1.Container{ - Name: "nop", - Image: *nopImage, - Command: []string{"/builder/tools/entrypoint"}, - Args: []string{"-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"}, - VolumeMounts: []corev1.VolumeMount{{ - Name: entrypoint.MountName, - MountPath: entrypoint.MountPoint, - }}, - } ) func TestTryGetPod(t *testing.T) { @@ -173,7 +163,6 @@ func TestMakePod(t *testing.T) { }, }, }, - nopContainer, }, Volumes: implicitVolumes, }, @@ -219,7 +208,6 @@ func TestMakePod(t *testing.T) { }, }, }, - nopContainer, }, Volumes: implicitVolumesWithSecrets, }, @@ -259,7 +247,6 @@ func TestMakePod(t *testing.T) { }, }, }, - nopContainer, }, Volumes: implicitVolumes, }, @@ -299,7 +286,6 @@ func TestMakePod(t *testing.T) { }, }, }, - nopContainer, }, Volumes: implicitVolumes, }, @@ -345,7 +331,6 @@ func TestMakePod(t *testing.T) { }, }, }, - nopContainer, }, Volumes: implicitVolumes, }, @@ -397,7 +382,7 @@ func TestMakePod(t *testing.T) { t.Errorf("Diff spec:\n%s", d) } - wantAnnotations := map[string]string{"sidecar.istio.io/inject": "false"} + wantAnnotations := map[string]string{ReadyAnnotation: ""} if c.bAnnotations != nil { for key, val := range c.bAnnotations { wantAnnotations[key] = val @@ -435,3 +420,93 @@ func TestMakeWorkingDirScript(t *testing.T) { }) } } + +func TestAddReadyAnnotation(t *testing.T) { + type testcase struct { + desc string + pod *corev1.Pod + updateFunc UpdatePod + } + for _, c := range []testcase{{ + desc: "missing ready annotation is added to provided pod", + pod: &corev1.Pod{}, + updateFunc: func(p *corev1.Pod) (*corev1.Pod, error) { return p, nil }, + }} { + t.Run(c.desc, func(t *testing.T) { + err := AddReadyAnnotation(c.pod, c.updateFunc) + if err != nil { + t.Errorf("error received: %v", err) + } + if v := c.pod.ObjectMeta.Annotations[ReadyAnnotation]; v != readyAnnotationValue { + t.Errorf("Annotation %q=%q missing from Pod", ReadyAnnotation, readyAnnotationValue) + } + }) + } +} + +func TestAddReadyAnnotationUpdateError(t *testing.T) { + type testcase struct { + desc string + pod *corev1.Pod + updateFunc UpdatePod + } + testerror := xerrors.New("error updating pod") + for _, c := range []testcase{{ + desc: "errors experienced during update are returned", + pod: &corev1.Pod{}, + updateFunc: func(p *corev1.Pod) (*corev1.Pod, error) { return p, testerror }, + }} { + t.Run(c.desc, func(t *testing.T) { + err := AddReadyAnnotation(c.pod, c.updateFunc) + if err != testerror { + t.Errorf("expected %v received %v", testerror, err) + } + }) + } +} + +func TestMakeAnnotations(t *testing.T) { + type testcase struct { + desc string + taskRun *v1alpha1.TaskRun + expectedAnnotationSubset map[string]string + } + for _, c := range []testcase{ + { + desc: "a taskruns annotations are copied to the pod", + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-taskrun", + Annotations: map[string]string{ + "foo": "bar", + "baz": "quux", + }, + }, + }, + expectedAnnotationSubset: map[string]string{ + "foo": "bar", + "baz": "quux", + }, + }, + { + desc: "initial pod annotations contain the ReadyAnnotation to pause steps until sidecars are ready", + taskRun: &v1alpha1.TaskRun{}, + expectedAnnotationSubset: map[string]string{ + ReadyAnnotation: "", + }, + }, + } { + t.Run(c.desc, func(t *testing.T) { + annos := makeAnnotations(c.taskRun) + for k, v := range c.expectedAnnotationSubset { + receivedValue, ok := annos[k] + if !ok { + t.Errorf("expected annotation %q was missing", k) + } + if receivedValue != v { + t.Errorf("expected annotation %q=%q, received %q=%q", k, v, k, receivedValue) + } + } + }) + } +} diff --git a/pkg/reconciler/v1alpha1/taskrun/sidecars/stop.go b/pkg/reconciler/v1alpha1/taskrun/sidecars/stop.go new file mode 100644 index 00000000000..d20cc95d32a --- /dev/null +++ b/pkg/reconciler/v1alpha1/taskrun/sidecars/stop.go @@ -0,0 +1,46 @@ +package sidecars + +import ( + "flag" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + nopImage = flag.String("nop-image", "override-with-nop:latest", "The container image used to kill sidecars") +) + +type GetPod func(string, metav1.GetOptions) (*corev1.Pod, error) +type UpdatePod func(*corev1.Pod) (*corev1.Pod, error) + +// Stop stops all sidecar containers inside a pod. A container is considered +// to be a sidecar if it is currently running. This func is only expected to +// be called after a TaskRun completes and all Step containers Step containers +// have already stopped. +// +// A sidecar is killed by replacing its current container image with the nop +// image, which in turn quickly exits. If the sidedcar defines a command then +// it will exit with a non-zero status. When we check for TaskRun success we +// have to check for the containers we care about - not the final Pod status. +func Stop(pod *corev1.Pod, updatePod UpdatePod) error { + updated := false + if pod.Status.Phase == corev1.PodRunning { + for _, s := range pod.Status.ContainerStatuses { + if s.State.Running != nil { + for j, c := range pod.Spec.Containers { + if c.Name == s.Name && c.Image != *nopImage { + updated = true + pod.Spec.Containers[j].Image = *nopImage + } + } + } + } + } + if updated { + if _, err := updatePod(pod); err != nil { + return err + } + } + return nil +} diff --git a/pkg/reconciler/v1alpha1/taskrun/sidecars/stop_test.go b/pkg/reconciler/v1alpha1/taskrun/sidecars/stop_test.go new file mode 100644 index 00000000000..758a8b36c42 --- /dev/null +++ b/pkg/reconciler/v1alpha1/taskrun/sidecars/stop_test.go @@ -0,0 +1,102 @@ +package sidecars + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestStop exercises the Stop() method of the sidecars package. +// A sidecar is killed by having its container image changed to that of the nop image. +// This test therefore runs through a series of pod and sidecar configurations, +// checking whether the resulting image in the sidecar's container matches that expected. +func TestStop(t *testing.T) { + testcases := []struct { + description string + podPhase corev1.PodPhase + sidecarContainer corev1.Container + sidecarState corev1.ContainerState + expectedImage string + }{{ + description: "a running sidecar container should be stopped", + podPhase: corev1.PodRunning, + sidecarContainer: corev1.Container{ + Name: "echo-hello", + Image: "echo-hello:latest", + Command: []string{"echo"}, + Args: []string{"hello"}, + }, + sidecarState: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{StartedAt: metav1.NewTime(time.Now())}, + }, + expectedImage: *nopImage, + }, { + description: "a pending pod should not have its sidecars stopped", + podPhase: corev1.PodPending, + sidecarContainer: corev1.Container{ + Name: "echo-hello", + Image: "echo-hello:latest", + Command: []string{"echo"}, + Args: []string{"hello"}, + }, + sidecarState: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{StartedAt: metav1.NewTime(time.Now())}, + }, + expectedImage: "echo-hello:latest", + }, { + description: "a sidecar container that is not in a running state should not be stopped", + podPhase: corev1.PodRunning, + sidecarContainer: corev1.Container{ + Name: "echo-hello", + Image: "echo-hello:latest", + Command: []string{"echo"}, + Args: []string{"hello"}, + }, + sidecarState: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{}, + }, + expectedImage: "echo-hello:latest", + }} + for _, tc := range testcases { + t.Run(tc.description, func(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test_pod", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: nil, + Containers: []corev1.Container{ + { + Name: "test-task-step", + Image: "test-task-step:latest", + Command: []string{"test"}, + }, + tc.sidecarContainer, + }, + }, + Status: corev1.PodStatus{ + Phase: tc.podPhase, + ContainerStatuses: []corev1.ContainerStatus{ + corev1.ContainerStatus{}, + corev1.ContainerStatus{ + Name: tc.sidecarContainer.Name, + State: tc.sidecarState, + }, + }, + }, + } + updatePod := func(p *corev1.Pod) (*corev1.Pod, error) { return nil, nil } + if err := Stop(pod, updatePod); err != nil { + t.Errorf("error stopping sidecar: %v", err) + } + sidecarIdx := len(pod.Spec.Containers) - 1 + if pod.Spec.Containers[sidecarIdx].Image != tc.expectedImage { + t.Errorf("expected sidecar image %q, actual: %q", tc.expectedImage, pod.Spec.Containers[sidecarIdx].Image) + } + }) + } +} diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun.go b/pkg/reconciler/v1alpha1/taskrun/taskrun.go index e54926375ca..161da91ba2f 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun.go @@ -33,6 +33,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources" + "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/sidecars" "github.com/tektoncd/pipeline/pkg/status" "go.uber.org/zap" "golang.org/x/xerrors" @@ -46,33 +47,6 @@ import ( ) const ( - // reasonCouldntGetTask indicates that the reason for the failure status is that the - // Task couldn't be found - reasonCouldntGetTask = "CouldntGetTask" - - // reasonFailedResolution indicated that the reason for failure status is - // that references within the TaskRun could not be resolved - reasonFailedResolution = "TaskRunResolutionFailed" - - // reasonFailedValidation indicated that the reason for failure status is - // that taskrun failed runtime validation - reasonFailedValidation = "TaskRunValidationFailed" - - // reasonRunning indicates that the reason for the inprogress status is that the TaskRun - // is just starting to be reconciled - reasonRunning = "Running" - - // reasonTimedOut indicates that the TaskRun has taken longer than its configured timeout - reasonTimedOut = "TaskRunTimeout" - - // reasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to - // a ResourceQuota in the namespace - reasonExceededResourceQuota = "ExceededResourceQuota" - - // reasonExceededNodeResources indicates that the TaskRun's pod has failed to start due - // to resource constraints on the node - reasonExceededNodeResources = "ExceededNodeResources" - // taskRunAgentName defines logging agent name for TaskRun Controller taskRunAgentName = "taskrun-controller" // taskRunControllerName defines name for TaskRun Controller @@ -180,7 +154,16 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { if tr.IsDone() { c.timeoutHandler.Release(tr) - return nil + pod, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err == nil { + err = sidecars.Stop(pod, c.KubeClientSet.CoreV1().Pods(tr.Namespace).Update) + } else if errors.IsNotFound(err) { + return nil + } + if err != nil { + c.Logger.Errorf("Error stopping sidecars for TaskRun %q: %v", name, err) + } + return err } // Reconcile this copy of the task run and then write back any status @@ -249,7 +232,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: reasonFailedResolution, + Reason: status.ReasonFailedResolution, Message: err.Error(), }) return nil @@ -288,7 +271,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: reasonFailedResolution, + Reason: status.ReasonFailedResolution, Message: err.Error(), }) return nil @@ -299,7 +282,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: reasonFailedValidation, + Reason: status.ReasonFailedValidation, Message: err.Error(), }) return nil @@ -324,13 +307,13 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error return err } - if isPodExceedingNodeResources(pod) { - c.Recorder.Eventf(tr, corev1.EventTypeWarning, reasonExceededNodeResources, "Insufficient resources to schedule pod %q", pod.Name) + if status.IsPodExceedingNodeResources(pod) { + c.Recorder.Eventf(tr, corev1.EventTypeWarning, status.ReasonExceededNodeResources, "Insufficient resources to schedule pod %q", pod.Name) } before := tr.Status.GetCondition(apis.ConditionSucceeded) - updateStatusFromPod(tr, pod, c.resourceLister, c.KubeClientSet, c.Logger) + addReady := status.UpdateStatusFromPod(tr, pod, c.resourceLister, c.KubeClientSet, c.Logger) status.SortTaskRunStepOrder(tr.Status.Steps, taskSpec.Steps) @@ -338,6 +321,12 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error after := tr.Status.GetCondition(apis.ConditionSucceeded) + if addReady { + if err := c.updateReady(pod); err != nil { + return err + } + } + reconciler.EmitEvent(c.Recorder, before, after, tr) c.Logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, after) @@ -345,82 +334,20 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error return nil } -func updateStatusFromPod(taskRun *v1alpha1.TaskRun, pod *corev1.Pod, resourceLister listers.PipelineResourceLister, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) { - if taskRun.Status.GetCondition(apis.ConditionSucceeded) == nil || taskRun.Status.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { - // If the taskRunStatus doesn't exist yet, it's because we just started running - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reasonRunning, - Message: reasonRunning, - }) - } - - taskRun.Status.PodName = pod.Name - - taskRun.Status.Steps = []v1alpha1.StepState{} - for _, s := range pod.Status.ContainerStatuses { - taskRun.Status.Steps = append(taskRun.Status.Steps, v1alpha1.StepState{ - ContainerState: *s.State.DeepCopy(), - Name: resources.TrimContainerNamePrefix(s.Name), - }) - } - - switch pod.Status.Phase { - case corev1.PodRunning: - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reasonRunning, - }) - case corev1.PodFailed: - msg := getFailureMessage(pod) - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Message: msg, - }) - // update tr completed time - taskRun.Status.CompletionTime = &metav1.Time{Time: time.Now()} - case corev1.PodPending: - var reason, msg string - if isPodExceedingNodeResources(pod) { - reason = reasonExceededNodeResources - msg = getExceededResourcesMessage(taskRun) - } else { - reason = "Pending" - msg = getWaitingMessage(pod) - } - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reason, - Message: msg, - }) - case corev1.PodSucceeded: - taskRun.Status.SetCondition(&apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - }) - // update tr completed time - taskRun.Status.CompletionTime = &metav1.Time{Time: time.Now()} - } -} - func (c *Reconciler) handlePodCreationError(tr *v1alpha1.TaskRun, err error) { var reason, msg string var succeededStatus corev1.ConditionStatus if isExceededResourceQuotaError(err) { succeededStatus = corev1.ConditionUnknown - reason = reasonExceededResourceQuota + reason = status.ReasonExceededResourceQuota backoff, currentlyBackingOff := c.timeoutHandler.GetBackoff(tr) if !currentlyBackingOff { go c.timeoutHandler.SetTaskRunTimer(tr, time.Until(backoff.NextAttempt)) } - msg = fmt.Sprintf("%s, reattempted %d times", getExceededResourcesMessage(tr), backoff.NumAttempts) + msg = fmt.Sprintf("%s, reattempted %d times", status.GetExceededResourcesMessage(tr), backoff.NumAttempts) } else { succeededStatus = corev1.ConditionFalse - reason = reasonCouldntGetTask + reason = status.ReasonCouldntGetTask if tr.Spec.TaskRef != nil { msg = fmt.Sprintf("Missing or invalid Task %s/%s", tr.Namespace, tr.Spec.TaskRef.Name) } else { @@ -455,51 +382,6 @@ func updateTaskRunResourceResult(taskRun *v1alpha1.TaskRun, pod *corev1.Pod, res } } -func getWaitingMessage(pod *corev1.Pod) string { - // First, try to surface reason for pending/unknown about the actual build step. - for _, status := range pod.Status.ContainerStatuses { - wait := status.State.Waiting - if wait != nil && wait.Message != "" { - return fmt.Sprintf("build step %q is pending with reason %q", - status.Name, wait.Message) - } - } - // Try to surface underlying reason by inspecting pod's recent status if condition is not true - for i, podStatus := range pod.Status.Conditions { - if podStatus.Status != corev1.ConditionTrue { - return fmt.Sprintf("pod status %q:%q; message: %q", - pod.Status.Conditions[i].Type, - pod.Status.Conditions[i].Status, - pod.Status.Conditions[i].Message) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - - // Lastly fall back on a generic pending message. - return "Pending" -} - -func getFailureMessage(pod *corev1.Pod) string { - // First, try to surface an error about the actual build step that failed. - for _, status := range pod.Status.ContainerStatuses { - term := status.State.Terminated - if term != nil && term.ExitCode != 0 { - return fmt.Sprintf("%q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", - status.Name, term.ExitCode, status.ImageID, - pod.Namespace, pod.Name, status.Name) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - // Lastly fall back on a generic error message. - return "build failed for unspecified reasons." -} - func (c *Reconciler) updateStatus(taskrun *v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) { newtaskrun, err := c.taskRunLister.TaskRuns(taskrun.Namespace).Get(taskrun.Name) if err != nil { @@ -525,6 +407,23 @@ func (c *Reconciler) updateLabelsAndAnnotations(tr *v1alpha1.TaskRun) (*v1alpha1 return newTr, nil } +// updateReady updates a Pod to include the "ready" annotation, which will be projected by +// the Downward API into a volume mounted by the entrypoint container. This will signal to +// the entrypoint that the TaskRun can proceed. +func (c *Reconciler) updateReady(pod *corev1.Pod) error { + newPod, err := c.KubeClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("Error getting Pod %q when updating ready annotation: %w", pod.Name, err) + } + updatePod := c.KubeClientSet.CoreV1().Pods(newPod.Namespace).Update + if err := resources.AddReadyAnnotation(newPod, updatePod); err != nil { + c.Logger.Errorf("Failed to update ready annotation for pod %q for taskrun %q: %v", pod.Name, pod.Name, err) + return xerrors.Errorf("Error adding ready annotation to Pod %q: %w", pod.Name, err) + } + + return nil +} + // createPod creates a Pod based on the Task's configuration, with pvcName as a volumeMount // TODO(dibyom): Refactor resource setup/templating logic to its own function in the resources package func (c *Reconciler) createPod(tr *v1alpha1.TaskRun, rtr *resources.ResolvedTaskResources) (*corev1.Pod, error) { @@ -609,6 +508,23 @@ func createRedirectedTaskSpec(kubeclient kubernetes.Interface, ts *v1alpha1.Task EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }) + + ts.Volumes = append(ts.Volumes, corev1.Volume{ + Name: entrypoint.DownwardMountName, + VolumeSource: corev1.VolumeSource{ + DownwardAPI: &corev1.DownwardAPIVolumeSource{ + Items: []corev1.DownwardAPIVolumeFile{ + corev1.DownwardAPIVolumeFile{ + Path: entrypoint.DownwardMountReadyFile, + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: fmt.Sprintf("metadata.annotations['%s']", resources.ReadyAnnotation), + }, + }, + }, + }, + }, + }) + return ts, nil } @@ -639,7 +555,7 @@ func (c *Reconciler) checkTimeout(tr *v1alpha1.TaskRun, ts *v1alpha1.TaskSpec, d tr.Status.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, - Reason: reasonTimedOut, + Reason: status.ReasonTimedOut, Message: timeoutMsg, }) // update tr completed time @@ -650,23 +566,10 @@ func (c *Reconciler) checkTimeout(tr *v1alpha1.TaskRun, ts *v1alpha1.TaskSpec, d return false, nil } -func isPodExceedingNodeResources(pod *corev1.Pod) bool { - for _, podStatus := range pod.Status.Conditions { - if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") { - return true - } - } - return false -} - func isExceededResourceQuotaError(err error) bool { return err != nil && errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") } -func getExceededResourcesMessage(tr *v1alpha1.TaskRun) string { - return fmt.Sprintf("TaskRun pod %q exceeded available resources", tr.Name) -} - // resourceImplBinding maps pipeline resource names to the actual resource type implementations func resourceImplBinding(resources map[string]*v1alpha1.PipelineResource) (map[string]v1alpha1.PipelineResourceInterface, error) { p := make(map[string]v1alpha1.PipelineResourceInterface) diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go index 078e22247c9..705a0e476a3 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go @@ -24,16 +24,14 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" "github.com/knative/pkg/configmap" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - fakeclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" - informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" "github.com/tektoncd/pipeline/pkg/logging" "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/entrypoint" "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources" + "github.com/tektoncd/pipeline/pkg/status" "github.com/tektoncd/pipeline/pkg/system" "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" @@ -63,7 +61,6 @@ const ( var ( entrypointCache *entrypoint.Cache ignoreLastTransitionTime = cmpopts.IgnoreTypes(apis.Condition{}.LastTransitionTime.Inner.Time) - ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) // Pods are created with a random 3-byte (6 hex character) suffix that we want to ignore in our diffs. ignoreRandomPodNameSuffix = cmp.FilterPath(func(path cmp.Path) bool { return path.GoString() == "{v1.ObjectMeta}.Name" @@ -74,8 +71,8 @@ var ( return x.Cmp(y) == 0 }) - simpleStep = tb.Step("simple-step", "foo", tb.Command("/mycmd")) - simpleTask = tb.Task("test-task", "foo", tb.TaskSpec(simpleStep)) + simpleStep = tb.Step("simple-step", "foo", tb.Command("/mycmd")) + simpleTask = tb.Task("test-task", "foo", tb.TaskSpec(simpleStep)) taskMultipleSteps = tb.Task("test-task-multi-steps", "foo", tb.TaskSpec( tb.Step("z-step", "foo", tb.Command("/mycmd"), @@ -162,6 +159,21 @@ var ( EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } + downward = corev1.Volume{ + Name: "downward", + VolumeSource: corev1.VolumeSource{ + DownwardAPI: &corev1.DownwardAPIVolumeSource{ + Items: []corev1.DownwardAPIVolumeFile{ + { + Path: "ready", + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['tekton.dev/ready']", + }, + }, + }, + }, + }, + } getCredentialsInitContainer = func(suffix string, ops ...tb.ContainerOp) tb.PodSpecOp { actualOps := []tb.ContainerOp{ @@ -386,22 +398,23 @@ func TestReconcile(t *testing.T) { name: "success", taskRun: taskRunSuccess, wantPod: tb.Pod("test-taskrun-run-success-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-run-success"), tb.PodOwnerReference("TaskRun", "test-taskrun-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-simple-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -410,34 +423,30 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, wantPod: tb.Pod("test-taskrun-with-sa-run-success-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-with-sa"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-sa-run-success"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-sa-run-success", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( tb.PodServiceAccountName("test-sa"), - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-sa-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -446,18 +455,13 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "params", taskRun: taskRunTemplating, wantPod: tb.Pod("test-taskrun-templating-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-task-with-templating"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-templating"), tb.PodOwnerReference("TaskRun", "test-taskrun-templating", @@ -472,17 +476,18 @@ func TestReconcile(t *testing.T) { }, }, }, - }, toolsVolume, workspaceVolume, homeVolume), + }, toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("78c5n"), getPlaceToolsInitContainer(), tb.PodContainer("step-git-source-git-resource-mssqb", "override-with-git:latest", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/ko-app/git-init", "--", + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/ko-app/git-init", "--", "-url", "https://foo.git", "-revision", "master", "-path", "/workspace/workspace"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -552,18 +557,13 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/4", "-post_file", "/builder/tools/5", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "wrap-steps", taskRun: taskRunInputOutput, wantPod: tb.Pod("test-taskrun-input-output-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-output-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-input-output"), tb.PodOwnerReference("TaskRun", "test-taskrun-input-output", @@ -577,17 +577,18 @@ func TestReconcile(t *testing.T) { ReadOnly: false, }, }, - }, toolsVolume, workspaceVolume, homeVolume), + }, toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("vr6ds"), getPlaceToolsInitContainer(), tb.PodContainer("step-create-dir-another-git-resource-78c5n", "override-with-bash-noop:latest", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/ko-app/bash", "--", + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/ko-app/bash", "--", "-args", "mkdir -p /workspace/another-git-resource"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -689,33 +690,29 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/6", "-post_file", "/builder/tools/7", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "taskrun-with-taskspec", taskRun: taskRunWithTaskSpec, wantPod: tb.Pod("test-taskrun-with-taskspec-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-taskspec"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-taskspec", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("mz4c7"), getPlaceToolsInitContainer(), tb.PodContainer("step-git-source-git-resource-9l9zj", "override-with-git:latest", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/ko-app/git-init", "--", + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/ko-app/git-init", "--", "-url", "https://foo.git", "-revision", "master", "-path", "/workspace/workspace"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -739,33 +736,29 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/1", "-post_file", "/builder/tools/2", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "success-with-cluster-task", taskRun: taskRunWithClusterTask, wantPod: tb.Pod("test-taskrun-with-cluster-task-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-cluster-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-cluster-task"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-cluster-task", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-simple-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -774,34 +767,30 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "taskrun-with-resource-spec-task-spec", taskRun: taskRunWithResourceSpecAndTaskSpec, wantPod: tb.Pod("test-taskrun-with-resource-spec-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-resource-spec"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-resource-spec", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("mz4c7"), getPlaceToolsInitContainer(), tb.PodContainer("step-git-source-workspace-9l9zj", "override-with-git:latest", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/ko-app/git-init", "--", + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/ko-app/git-init", "--", "-url", "github.com/foo/bar.git", "-revision", "rel-can", "-path", "/workspace/workspace"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -824,34 +813,30 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/1", "-post_file", "/builder/tools/2", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "taskrun-with-labels", taskRun: taskRunWithLabels, wantPod: tb.Pod("test-taskrun-with-labels-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel("TaskRunLabel", "TaskRunValue"), tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-labels"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-labels", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-simple-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -860,34 +845,30 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "taskrun-with-annotations", taskRun: taskRunWithAnnotations, wantPod: tb.Pod("test-taskrun-with-annotations-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodAnnotation("TaskRunAnnotation", "TaskRunValue"), tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-annotations"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-annotations", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-simple-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -896,30 +877,25 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "task-env", taskRun: taskRunTaskEnv, wantPod: tb.Pod("test-taskrun-task-env-pod-311bc9", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-task-env"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-task-env"), tb.PodOwnerReference("TaskRun", "test-taskrun-task-env", tb.OwnerReferenceAPIVersion("tekton.dev/v1alpha1")), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj", tb.EnvVar("FRUIT", "APPLE")), getPlaceToolsInitContainer(tb.EnvVar("FRUIT", "APPLE")), tb.PodContainer("step-env-step", "foo", tb.Command(entrypointLocation), tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.Resources(tb.Requests( @@ -928,38 +904,34 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.EnvVar("ANOTHER", "VARIABLE"), tb.EnvVar("FRUIT", "LEMON"), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - tb.EnvVar("FRUIT", "APPLE"), - ), ), ), }, { name: "taskrun-with-resource-requests", taskRun: taskRunWithResourceRequests, wantPod: tb.Pod("test-taskrun-with-resource-requests-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-resource-requests"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-resource-requests", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-step1", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources( @@ -1005,33 +977,29 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/2", "-post_file", "/builder/tools/3", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }, { name: "taskrun-with-pod", taskRun: taskRunWithPod, wantPod: tb.Pod("test-taskrun-with-pod-pod-123456", "foo", - tb.PodAnnotation("sidecar.istio.io/inject", "false"), + tb.PodAnnotation("tekton.dev/ready", ""), tb.PodLabel(taskNameLabelKey, "test-task"), tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-pod"), tb.PodOwnerReference("TaskRun", "test-taskrun-with-pod", tb.OwnerReferenceAPIVersion(currentApiVersion)), tb.PodSpec( - tb.PodVolumes(toolsVolume, workspaceVolume, homeVolume), + tb.PodVolumes(toolsVolume, downward, workspaceVolume, homeVolume), tb.PodRestartPolicy(corev1.RestartPolicyNever), getCredentialsInitContainer("9l9zj"), getPlaceToolsInitContainer(), tb.PodContainer("step-simple-step", "foo", tb.Command(entrypointLocation), - tb.Args("-wait_file", "", "-post_file", "/builder/tools/0", "-entrypoint", "/mycmd", "--"), + tb.Args("-wait_file", "/builder/downward/ready", "-post_file", "/builder/tools/0", "-wait_file_content", "-entrypoint", "/mycmd", "--"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/builder/home"), tb.VolumeMount("tools", "/builder/tools"), + tb.VolumeMount("downward", "/builder/downward"), tb.VolumeMount("workspace", workspaceDir), tb.VolumeMount("home", "/builder/home"), tb.Resources(tb.Requests( @@ -1040,11 +1008,6 @@ func TestReconcile(t *testing.T) { tb.EphemeralStorage("0"), )), ), - tb.PodContainer("nop", "override-with-nop:latest", - tb.Command("/builder/tools/entrypoint"), - tb.Args("-wait_file", "/builder/tools/0", "-post_file", "/builder/tools/1", "-entrypoint", "/ko-app/nop", "--"), - tb.VolumeMount(entrypoint.MountName, entrypoint.MountPoint), - ), ), ), }} { @@ -1089,8 +1052,8 @@ func TestReconcile(t *testing.T) { if condition == nil || condition.Status != corev1.ConditionUnknown { t.Errorf("Expected invalid TaskRun to have in progress status, but had %v", condition) } - if condition != nil && condition.Reason != reasonRunning { - t.Errorf("Expected reason %q but was %s", reasonRunning, condition.Reason) + if condition != nil && condition.Reason != status.ReasonRunning { + t.Errorf("Expected reason %q but was %s", status.ReasonRunning, condition.Reason) } if tr.Status.PodName == "" { @@ -1135,7 +1098,7 @@ func TestReconcile_SetsStartTime(t *testing.T) { } } -func TestReconcile_SortTaskRunStatusSteps(t *testing.T) { +func TestReconcile_SortTaskRunStatusSteps(t *testing.T) { taskRun := tb.TaskRun("test-taskrun", "foo", tb.TaskRunSpec( tb.TaskRunTaskRef(taskMultipleSteps.Name)), tb.TaskRunStatus( @@ -1197,11 +1160,11 @@ func TestReconcile_SortTaskRunStatusSteps(t *testing.T) { func verify_TaskRunStatusStep(t *testing.T, taskRun *v1alpha1.TaskRun, task *v1alpha1.Task) { actualStepOrder := []string{} - for _, state := range(taskRun.Status.Steps) { + for _, state := range taskRun.Status.Steps { actualStepOrder = append(actualStepOrder, state.Name) } expectedStepOrder := []string{} - for _, state := range(taskMultipleSteps.Spec.Steps) { + for _, state := range taskMultipleSteps.Spec.Steps { expectedStepOrder = append(expectedStepOrder, state.Name) } // Add a nop in the end. This may be removed in future. @@ -1262,12 +1225,12 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { { name: "task run with no task", taskRun: noTaskRun, - reason: reasonFailedResolution, + reason: status.ReasonFailedResolution, }, { name: "task run with no task", taskRun: withWrongRef, - reason: reasonFailedResolution, + reason: status.ReasonFailedResolution, }, } @@ -1326,9 +1289,7 @@ func TestReconcilePodFetchError(t *testing.T) { } } -func TestReconcilePodUpdateStatus(t *testing.T) { - taskRun := tb.TaskRun("test-taskrun-run-success", "foo", tb.TaskRunSpec(tb.TaskRunTaskRef("test-task"))) - +func makePod(taskRun *v1alpha1.TaskRun, task *v1alpha1.Task) (*corev1.Pod, error) { logger, _ := logging.NewLogger("", "") cache, _ := entrypoint.NewCache() // TODO(jasonhall): This avoids a circular dependency where @@ -1338,12 +1299,18 @@ func TestReconcilePodUpdateStatus(t *testing.T) { // specify the Pod we want to exist directly, and not call MakePod from // the build. This will break the cycle and allow us to simply use // clients normally. - pod, err := resources.MakePod(taskRun, simpleTask.Spec, fakekubeclientset.NewSimpleClientset(&corev1.ServiceAccount{ + return resources.MakePod(taskRun, task.Spec, fakekubeclientset.NewSimpleClientset(&corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: taskRun.Namespace, }, }), cache, logger) +} + +func TestReconcilePodUpdateStatus(t *testing.T) { + taskRun := tb.TaskRun("test-taskrun-run-success", "foo", tb.TaskRunSpec(tb.TaskRunTaskRef("test-task"))) + + pod, err := makePod(taskRun, simpleTask) if err != nil { t.Fatalf("MakePod: %v", err) } @@ -1410,7 +1377,7 @@ func TestCreateRedirectedTaskSpec(t *testing.T) { )) expectedSteps := len(task.Spec.Steps) + 1 - expectedVolumes := len(task.Spec.Volumes) + 1 + expectedVolumes := len(task.Spec.Volumes) + 2 observer, _ := observer.New(zap.InfoLevel) entrypointCache, _ := entrypoint.NewCache() @@ -1563,375 +1530,6 @@ func TestReconcileTimeouts(t *testing.T) { } } -func TestUpdateStatusFromPod(t *testing.T) { - conditionRunning := apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reasonRunning, - Message: reasonRunning, - } - conditionTrue := apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionTrue, - } - conditionBuilding := apis.Condition{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reasonRunning, - } - for _, c := range []struct { - desc string - podStatus corev1.PodStatus - want v1alpha1.TaskRunStatus - }{{ - desc: "empty", - podStatus: corev1.PodStatus{}, - - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionRunning}, - }, - Steps: []v1alpha1.StepState{}, - }, - }, { - desc: "ignore-creds-init", - podStatus: corev1.PodStatus{ - InitContainerStatuses: []corev1.ContainerStatus{{ - // creds-init; ignored - }}, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "state-name", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionRunning}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }}, - Name: "state-name", - }}, - }, - }, { - desc: "ignore-init-containers", - podStatus: corev1.PodStatus{ - InitContainerStatuses: []corev1.ContainerStatus{{ - // creds-init; ignored. - }, { - // git-init; ignored. - }}, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "state-name", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionRunning}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }}, - Name: "state-name", - }}, - }, - }, { - desc: "success", - podStatus: corev1.PodStatus{ - Phase: corev1.PodSucceeded, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "step-step-push", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionTrue}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - }}, - Name: "step-push", - }}, - // We don't actually care about the time, just that it's not nil - CompletionTime: &metav1.Time{Time: time.Now()}, - }, - }, { - desc: "running", - podStatus: corev1.PodStatus{ - Phase: corev1.PodRunning, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "step-running-step", - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{conditionBuilding}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - Name: "running-step", - }}, - }, - }, { - desc: "failure-terminated", - podStatus: corev1.PodStatus{ - Phase: corev1.PodFailed, - InitContainerStatuses: []corev1.ContainerStatus{{ - // creds-init status; ignored - }}, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "step-failure", - ImageID: "image-id", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Message: `"step-failure" exited with code 123 (image: "image-id"); for logs run: kubectl -n foo logs pod -c step-failure`, - }}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }}, - Name: "failure", - }}, - // We don't actually care about the time, just that it's not nil - CompletionTime: &metav1.Time{Time: time.Now()}, - }, - }, { - desc: "failure-message", - podStatus: corev1.PodStatus{ - Phase: corev1.PodFailed, - Message: "boom", - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Message: "boom", - }}, - }, - Steps: []v1alpha1.StepState{}, - // We don't actually care about the time, just that it's not nil - CompletionTime: &metav1.Time{Time: time.Now()}, - }, - }, { - desc: "failure-unspecified", - podStatus: corev1.PodStatus{Phase: corev1.PodFailed}, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionFalse, - Message: "build failed for unspecified reasons.", - }}, - }, - Steps: []v1alpha1.StepState{}, - // We don't actually care about the time, just that it's not nil - CompletionTime: &metav1.Time{Time: time.Now()}, - }, - }, { - desc: "pending-waiting-message", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - InitContainerStatuses: []corev1.ContainerStatus{{ - // creds-init status; ignored - }}, - ContainerStatuses: []corev1.ContainerStatus{{ - Name: "status-name", - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Message: "i'm pending", - }, - }, - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: `build step "status-name" is pending with reason "i'm pending"`, - }}, - }, - Steps: []v1alpha1.StepState{{ - ContainerState: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Message: "i'm pending", - }, - }, - Name: "status-name", - }}, - }, - }, { - desc: "pending-pod-condition", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - Conditions: []corev1.PodCondition{{ - Status: corev1.ConditionUnknown, - Type: "the type", - Message: "the message", - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: `pod status "the type":"Unknown"; message: "the message"`, - }}, - }, - Steps: []v1alpha1.StepState{}, - }, - }, { - desc: "pending-message", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - Message: "pod status message", - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: "pod status message", - }}, - }, - Steps: []v1alpha1.StepState{}, - }, - }, { - desc: "pending-no-message", - podStatus: corev1.PodStatus{Phase: corev1.PodPending}, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: "Pending", - }}, - }, - Steps: []v1alpha1.StepState{}, - }, - }, { - desc: "pending-not-enough-node-resources", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - Conditions: []corev1.PodCondition{{ - Reason: corev1.PodReasonUnschedulable, - Message: "0/1 nodes are available: 1 Insufficient cpu.", - }}, - }, - want: v1alpha1.TaskRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: reasonExceededNodeResources, - Message: `TaskRun pod "taskRun" exceeded available resources`, - }}, - }, - Steps: []v1alpha1.StepState{}, - }, - }} { - t.Run(c.desc, func(t *testing.T) { - observer, _ := observer.New(zap.InfoLevel) - logger := zap.New(observer).Sugar() - fakeClient := fakeclientset.NewSimpleClientset() - sharedInfomer := informers.NewSharedInformerFactory(fakeClient, 0) - pipelineResourceInformer := sharedInfomer.Tekton().V1alpha1().PipelineResources() - resourceLister := pipelineResourceInformer.Lister() - fakekubeclient := fakekubeclientset.NewSimpleClientset() - - rs := []*v1alpha1.PipelineResource{{ - ObjectMeta: metav1.ObjectMeta{ - Name: "source-image", - Namespace: "marshmallow", - }, - Spec: v1alpha1.PipelineResourceSpec{ - Type: "image", - }, - }} - - for _, r := range rs { - err := pipelineResourceInformer.Informer().GetIndexer().Add(r) - if err != nil { - t.Errorf("pipelineResourceInformer.Informer().GetIndexer().Add(r) failed with err: %s", err) - } - } - - now := metav1.Now() - p := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "foo", - CreationTimestamp: now, - }, - Status: c.podStatus, - } - startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) - tr := tb.TaskRun("taskRun", "foo", tb.TaskRunStatus(tb.TaskRunStartTime(startTime))) - updateStatusFromPod(tr, p, resourceLister, fakekubeclient, logger) - - // Common traits, set for test case brevity. - c.want.PodName = "pod" - c.want.StartTime = &metav1.Time{Time: startTime} - - ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { - if x == nil { - return y == nil - } - return y != nil - }) - if d := cmp.Diff(tr.Status, c.want, ignoreVolatileTime, ensureTimeNotNil); d != "" { - t.Errorf("Diff:\n%s", d) - } - if tr.Status.StartTime.Time != c.want.StartTime.Time { - t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) - } - }) - } -} - func TestHandlePodCreationError(t *testing.T) { taskRun := tb.TaskRun("test-taskrun-pod-creation-failed", "foo", tb.TaskRunSpec( tb.TaskRunTaskRef(simpleTask.Name), @@ -1968,14 +1566,14 @@ func TestHandlePodCreationError(t *testing.T) { err: k8sapierrors.NewForbidden(k8sruntimeschema.GroupResource{Group: "foo", Resource: "bar"}, "baz", errors.New("exceeded quota")), expectedType: apis.ConditionSucceeded, expectedStatus: corev1.ConditionUnknown, - expectedReason: reasonExceededResourceQuota, + expectedReason: status.ReasonExceededResourceQuota, }, { description: "errors other than exceeded quota fail the taskrun", err: errors.New("this is a fatal error"), expectedType: apis.ConditionSucceeded, expectedStatus: corev1.ConditionFalse, - expectedReason: reasonCouldntGetTask, + expectedReason: status.ReasonCouldntGetTask, }, } for _, tc := range testcases { diff --git a/pkg/status/taskrunpod.go b/pkg/status/taskrunpod.go new file mode 100644 index 00000000000..f698d2a399a --- /dev/null +++ b/pkg/status/taskrunpod.go @@ -0,0 +1,192 @@ +package status + +import ( + "fmt" + "strings" + "time" + + "github.com/knative/pkg/apis" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/reconciler/v1alpha1/taskrun/resources" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// UpdateStatusFromPod modifies the task run status based on the pod and then returns true if the pod is running and +// all sidecars are ready +func UpdateStatusFromPod(taskRun *v1alpha1.TaskRun, pod *corev1.Pod, resourceLister listers.PipelineResourceLister, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) bool { + if taskRun.Status.GetCondition(apis.ConditionSucceeded) == nil || taskRun.Status.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { + // If the taskRunStatus doesn't exist yet, it's because we just started running + taskRun.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonRunning, + Message: ReasonRunning, + }) + } + + taskRun.Status.PodName = pod.Name + + taskRun.Status.Steps = []v1alpha1.StepState{} + for _, s := range pod.Status.ContainerStatuses { + if resources.IsContainerStep(s.Name) { + taskRun.Status.Steps = append(taskRun.Status.Steps, v1alpha1.StepState{ + ContainerState: *s.State.DeepCopy(), + Name: resources.TrimContainerNamePrefix(s.Name), + }) + } + } + + // Complete if we did not find a step that is not complete, or the pod is in a definitely complete phase + complete := areStepsComplete(pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed + + if complete { + updateCompletedTaskRun(taskRun, pod) + } else { + updateIncompleteTaskRun(taskRun, pod) + } + + sidecarsCount, readySidecarsCount := countSidecars(pod) + return pod.Status.Phase == corev1.PodRunning && readySidecarsCount == sidecarsCount +} + +func updateCompletedTaskRun(taskRun *v1alpha1.TaskRun, pod *corev1.Pod) { + if didTaskRunFail(pod) { + msg := getFailureMessage(pod) + taskRun.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Message: msg, + }) + } else { + taskRun.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }) + } + // update tr completed time + taskRun.Status.CompletionTime = &metav1.Time{Time: time.Now()} +} + +func updateIncompleteTaskRun(taskRun *v1alpha1.TaskRun, pod *corev1.Pod) { + switch pod.Status.Phase { + case corev1.PodRunning: + taskRun.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonBuilding, + }) + case corev1.PodPending: + var reason, msg string + if IsPodExceedingNodeResources(pod) { + reason = ReasonExceededNodeResources + msg = GetExceededResourcesMessage(taskRun) + } else { + reason = "Pending" + msg = GetWaitingMessage(pod) + } + taskRun.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: reason, + Message: msg, + }) + } +} + +func didTaskRunFail(pod *corev1.Pod) bool { + f := pod.Status.Phase == corev1.PodFailed + for _, s := range pod.Status.ContainerStatuses { + if resources.IsContainerStep(s.Name) { + if s.State.Terminated != nil { + f = f || s.State.Terminated.ExitCode != 0 + } + } + } + return f +} + +func areStepsComplete(pod *corev1.Pod) bool { + stepsComplete := len(pod.Status.ContainerStatuses) > 0 && pod.Status.Phase == corev1.PodRunning + for _, s := range pod.Status.ContainerStatuses { + if resources.IsContainerStep(s.Name) { + if s.State.Terminated == nil { + stepsComplete = false + } + } + } + return stepsComplete +} + +func countSidecars(pod *corev1.Pod) (total int, ready int) { + for _, s := range pod.Status.ContainerStatuses { + if !resources.IsContainerStep(s.Name) { + if s.State.Running != nil && s.Ready { + ready++ + } + total++ + } + } + return total, ready +} + +func getFailureMessage(pod *corev1.Pod) string { + // First, try to surface an error about the actual build step that failed. + for _, status := range pod.Status.ContainerStatuses { + term := status.State.Terminated + if term != nil && term.ExitCode != 0 { + return fmt.Sprintf("%q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", + status.Name, term.ExitCode, status.ImageID, + pod.Namespace, pod.Name, status.Name) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + // Lastly fall back on a generic error message. + return "build failed for unspecified reasons." +} + +func IsPodExceedingNodeResources(pod *corev1.Pod) bool { + for _, podStatus := range pod.Status.Conditions { + if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") { + return true + } + } + return false +} + +func GetExceededResourcesMessage(tr *v1alpha1.TaskRun) string { + return fmt.Sprintf("TaskRun pod %q exceeded available resources", tr.Name) +} + +func GetWaitingMessage(pod *corev1.Pod) string { + // First, try to surface reason for pending/unknown about the actual build step. + for _, status := range pod.Status.ContainerStatuses { + wait := status.State.Waiting + if wait != nil && wait.Message != "" { + return fmt.Sprintf("build step %q is pending with reason %q", + status.Name, wait.Message) + } + } + // Try to surface underlying reason by inspecting pod's recent status if condition is not true + for i, podStatus := range pod.Status.Conditions { + if podStatus.Status != corev1.ConditionTrue { + return fmt.Sprintf("pod status %q:%q; message: %q", + pod.Status.Conditions[i].Type, + pod.Status.Conditions[i].Status, + pod.Status.Conditions[i].Message) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + + // Lastly fall back on a generic pending message. + return "Pending" +} diff --git a/pkg/status/taskrunpod_test.go b/pkg/status/taskrunpod_test.go new file mode 100644 index 00000000000..031a0bad2a5 --- /dev/null +++ b/pkg/status/taskrunpod_test.go @@ -0,0 +1,422 @@ +package status + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/knative/pkg/apis" + duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + fakeclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions" + tb "github.com/tektoncd/pipeline/test/builder" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" +) + +var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) + +func TestUpdateStatusFromPod(t *testing.T) { + conditionRunning := apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonRunning, + Message: ReasonRunning, + } + conditionTrue := apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + } + conditionBuilding := apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonBuilding, + } + for _, c := range []struct { + desc string + podStatus corev1.PodStatus + want v1alpha1.TaskRunStatus + }{{ + desc: "empty", + podStatus: corev1.PodStatus{}, + + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionRunning}, + }, + Steps: []v1alpha1.StepState{}, + }, + }, { + desc: "ignore-creds-init", + podStatus: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init; ignored + }}, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-state-name", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionRunning}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }}, + Name: "state-name", + }}, + }, + }, { + desc: "ignore-init-containers", + podStatus: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init; ignored. + }, { + // git-init; ignored. + }}, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-state-name", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionRunning}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }}, + Name: "state-name", + }}, + }, + }, { + desc: "success", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-step-push", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionTrue}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + }}, + Name: "step-push", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, { + desc: "running", + podStatus: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-running-step", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionBuilding}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Name: "running-step", + }}, + }, + }, { + desc: "failure-terminated", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }}, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-failure", + ImageID: "image-id", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Message: `"step-failure" exited with code 123 (image: "image-id"); for logs run: kubectl -n foo logs pod -c step-failure`, + }}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }}, + Name: "failure", + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, { + desc: "failure-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + Message: "boom", + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Message: "boom", + }}, + }, + Steps: []v1alpha1.StepState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, { + desc: "failure-unspecified", + podStatus: corev1.PodStatus{Phase: corev1.PodFailed}, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Message: "build failed for unspecified reasons.", + }}, + }, + Steps: []v1alpha1.StepState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, { + desc: "pending-waiting-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }}, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-status-name", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "i'm pending", + }, + }, + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `build step "step-status-name" is pending with reason "i'm pending"`, + }}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "i'm pending", + }, + }, + Name: "status-name", + }}, + }, + }, { + desc: "pending-pod-condition", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Conditions: []corev1.PodCondition{{ + Status: corev1.ConditionUnknown, + Type: "the type", + Message: "the message", + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `pod status "the type":"Unknown"; message: "the message"`, + }}, + }, + Steps: []v1alpha1.StepState{}, + }, + }, { + desc: "pending-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Message: "pod status message", + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "pod status message", + }}, + }, + Steps: []v1alpha1.StepState{}, + }, + }, { + desc: "pending-no-message", + podStatus: corev1.PodStatus{Phase: corev1.PodPending}, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "Pending", + }}, + }, + Steps: []v1alpha1.StepState{}, + }, + }, { + desc: "pending-not-enough-node-resources", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Conditions: []corev1.PodCondition{{ + Reason: corev1.PodReasonUnschedulable, + Message: "0/1 nodes are available: 1 Insufficient cpu.", + }}, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonExceededNodeResources, + Message: `TaskRun pod "taskRun" exceeded available resources`, + }}, + }, + Steps: []v1alpha1.StepState{}, + }, + }, { + desc: "with-running-sidecar", + podStatus: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "step-running-step", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + { + Name: "running-sidecar", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Ready: true, + }, + }, + }, + want: v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{conditionBuilding}, + }, + Steps: []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Name: "running-step", + }}, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + observer, _ := observer.New(zap.InfoLevel) + logger := zap.New(observer).Sugar() + fakeClient := fakeclientset.NewSimpleClientset() + sharedInfomer := informers.NewSharedInformerFactory(fakeClient, 0) + pipelineResourceInformer := sharedInfomer.Tekton().V1alpha1().PipelineResources() + resourceLister := pipelineResourceInformer.Lister() + fakekubeclient := fakekubeclientset.NewSimpleClientset() + + rs := []*v1alpha1.PipelineResource{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "source-image", + Namespace: "marshmallow", + }, + Spec: v1alpha1.PipelineResourceSpec{ + Type: "image", + }, + }} + + for _, r := range rs { + err := pipelineResourceInformer.Informer().GetIndexer().Add(r) + if err != nil { + t.Errorf("pipelineResourceInformer.Informer().GetIndexer().Add(r) failed with err: %s", err) + } + } + + now := metav1.Now() + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + CreationTimestamp: now, + }, + Status: c.podStatus, + } + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) + tr := tb.TaskRun("taskRun", "foo", tb.TaskRunStatus(tb.TaskRunStartTime(startTime))) + UpdateStatusFromPod(tr, p, resourceLister, fakekubeclient, logger) + + // Common traits, set for test case brevity. + c.want.PodName = "pod" + c.want.StartTime = &metav1.Time{Time: startTime} + + ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { + if x == nil { + return y == nil + } + return y != nil + }) + if d := cmp.Diff(c.want, tr.Status, ignoreVolatileTime, ensureTimeNotNil); d != "" { + t.Errorf("Wanted:%s %v", c.desc, c.want.Conditions[0]) + t.Errorf("Diff:\n%s", d) + } + if tr.Status.StartTime.Time != c.want.StartTime.Time { + t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) + } + }) + } +} diff --git a/pkg/status/taskrunreasons.go b/pkg/status/taskrunreasons.go new file mode 100644 index 00000000000..f2ef9b4ce12 --- /dev/null +++ b/pkg/status/taskrunreasons.go @@ -0,0 +1,34 @@ +package status + +const ( + // reasonCouldntGetTask indicates that the reason for the failure status is that the + // Task couldn't be found + ReasonCouldntGetTask = "CouldntGetTask" + + // reasonFailedResolution indicated that the reason for failure status is + // that references within the TaskRun could not be resolved + ReasonFailedResolution = "TaskRunResolutionFailed" + + // reasonFailedValidation indicated that the reason for failure status is + // that taskrun failed runtime validation + ReasonFailedValidation = "TaskRunValidationFailed" + + // reasonRunning indicates that the reason for the inprogress status is that the TaskRun + // is just starting to be reconciled + ReasonRunning = "Running" + + // reasonBuilding indicates that the reason for the in-progress status is that the TaskRun + // is just being built + ReasonBuilding = "Building" + + // reasonTimedOut indicates that the TaskRun has taken longer than its configured timeout + ReasonTimedOut = "TaskRunTimeout" + + // reasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to + // a ResourceQuota in the namespace + ReasonExceededResourceQuota = "ExceededResourceQuota" + + // reasonExceededNodeResources indicates that the TaskRun's pod has failed to start due + // to resource constraints on the node + ReasonExceededNodeResources = "ExceededNodeResources" +) diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 0d0c43a9fc1..37d86d40e8d 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -41,7 +41,6 @@ const ( kanikoTaskName = "kanikotask" kanikoTaskRunName = "kanikotask-run" kanikoResourceName = "go-example-git" - kanikoBuildOutput = "Task completed successfully" ) func getGitResource(namespace string) *v1alpha1.PipelineResource { @@ -134,10 +133,6 @@ func TestKanikoTaskRun(t *testing.T) { if err != nil { t.Fatalf("Expected to get logs from pod %s: %v", podName, err) } - // check the logs contain our success criteria - if !strings.Contains(logs, kanikoBuildOutput) { - t.Fatalf("Expected output %s from pod %s but got %s", kanikoBuildOutput, podName, logs) - } // make sure the pushed digest matches the one we pushed re := regexp.MustCompile(`digest: (sha256:\w+)`) match := re.FindStringSubmatch(logs) diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 1bc82e1cbf7..f8018a49be5 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -95,14 +95,6 @@ func TestTaskRunFailure(t *testing.T) { }, }, Name: "world", - }, { - ContainerState: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 0, - Reason: "Completed", - }, - }, - Name: "nop", }} ignoreFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreFields); d != "" {