Skip to content

K3s

Since testcontainers-go v0.21.0

Introduction

The Testcontainers module for K3s.

Adding this module to your project dependencies

Please run the following command to add the K3s module to your Go dependencies:

go get github.com/testcontainers/testcontainers-go/modules/k3s

Usage example

package k3s_test

import (
    "context"
    "fmt"
    "testing"
    "time"

    corev1 "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    kwait "k8s.io/apimachinery/pkg/util/wait"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"

    "github.com/testcontainers/testcontainers-go"
    "github.com/testcontainers/testcontainers-go/modules/k3s"
    "github.com/testcontainers/testcontainers-go/wait"
)

func Test_LoadImages(t *testing.T) {
    // Give up to three minutes to run this test
    ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(3*time.Minute))
    defer cancel()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        t.Fatal(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            t.Fatal(err)
        }
    }()

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        t.Fatal(err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        t.Fatal(err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        t.Fatal(err)
    }

    provider, err := testcontainers.ProviderDocker.GetProvider()
    if err != nil {
        t.Fatal(err)
    }

    // ensure nginx image is available locally
    err = provider.PullImage(ctx, "nginx")
    if err != nil {
        t.Fatal(err)
    }

    t.Run("Test load image not available", func(t *testing.T) {
        err := k3sContainer.LoadImages(ctx, "fake.registry/fake:non-existing")
        if err == nil {
            t.Fatal("should had failed")
        }
    })

    t.Run("Test load image in cluster", func(t *testing.T) {
        err := k3sContainer.LoadImages(ctx, "nginx")
        if err != nil {
            t.Fatal(err)
        }

        pod := &corev1.Pod{
            TypeMeta: metav1.TypeMeta{
                Kind:       "Pod",
                APIVersion: "v1",
            },
            ObjectMeta: metav1.ObjectMeta{
                Name: "test-pod",
            },
            Spec: corev1.PodSpec{
                Containers: []corev1.Container{
                    {
                        Name:            "nginx",
                        Image:           "nginx",
                        ImagePullPolicy: corev1.PullNever, // use image only if already present
                    },
                },
            },
        }

        _, err = k8s.CoreV1().Pods("default").Create(ctx, pod, metav1.CreateOptions{})
        if err != nil {
            t.Fatal(err)
        }

        err = kwait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (bool, error) {
            state, err := getTestPodState(ctx, k8s)
            if err != nil {
                return false, err
            }
            if state.Terminated != nil {
                return false, fmt.Errorf("pod terminated: %v", state.Terminated)
            }
            return state.Running != nil, nil
        })
        if err != nil {
            t.Fatal(err)
        }

        state, err := getTestPodState(ctx, k8s)
        if err != nil {
            t.Fatal(err)
        }
        if state.Running == nil {
            t.Fatalf("Unexpected status %v", state)
        }
    })
}

func getTestPodState(ctx context.Context, k8s *kubernetes.Clientset) (state corev1.ContainerState, err error) {
    var pod *corev1.Pod
    pod, err = k8s.CoreV1().Pods("default").Get(ctx, "test-pod", metav1.GetOptions{})
    if err != nil || len(pod.Status.ContainerStatuses) == 0 {
        return
    }
    return pod.Status.ContainerStatuses[0].State, nil
}

func Test_APIServerReady(t *testing.T) {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        t.Fatal(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            t.Fatal(err)
        }
    }()

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        t.Fatal(err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        t.Fatal(err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        t.Fatal(err)
    }

    pod := &corev1.Pod{
        TypeMeta: metav1.TypeMeta{
            Kind:       "Pod",
            APIVersion: "v1",
        },
        ObjectMeta: metav1.ObjectMeta{
            Name: "test-pod",
        },
        Spec: corev1.PodSpec{
            Containers: []corev1.Container{
                {
                    Name:  "nginx",
                    Image: "nginx",
                },
            },
        },
    }

    _, err = k8s.CoreV1().Pods("default").Create(context.Background(), pod, metav1.CreateOptions{})
    if err != nil {
        t.Fatalf("failed to create pod %v", err)
    }
}

func Test_WithManifestOption(t *testing.T) {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
        k3s.WithManifest("nginx-manifest.yaml"),
        testcontainers.WithWaitStrategy(wait.ForExec([]string{"kubectl", "wait", "pod", "nginx", "--for=condition=Ready"})),
    )
    if err != nil {
        t.Fatal(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            t.Fatal(err)
        }
    }()
}

Module reference

The K3s module exposes one entrypoint function to create the K3s container, and this function receives two parameters:

func RunContainer(ctx context.Context, opts ...testcontainers.ContainerCustomizer) (*K3sContainer, error)
  • context.Context, the Go context.
  • testcontainers.ContainerCustomizer, a variadic argument for passing options.

Container Ports

These are the ports used by the K3s container:

defaultKubeSecurePort     = "6443/tcp"
defaultRancherWebhookPort = "8443/tcp"

Container Options

When starting the K3s container, you can pass options in a variadic way to configure it.

Image

If you need to set a different K3s Docker image, you can use testcontainers.WithImage with a valid Docker image for K3s. E.g. testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1").

Image Substitutions

In more locked down / secured environments, it can be problematic to pull images from Docker Hub and run them without additional precautions.

An image name substitutor converts a Docker image name, as may be specified in code, to an alternative name. This is intended to provide a way to override image names, for example to enforce pulling of images from a private registry.

Testcontainers for Go exposes an interface to perform this operations: ImageSubstitutor, and a No-operation implementation to be used as reference for custom implementations:

// ImageSubstitutor represents a way to substitute container image names
type ImageSubstitutor interface {
    // Description returns the name of the type and a short description of how it modifies the image.
    // Useful to be printed in logs
    Description() string
    Substitute(image string) (string, error)
}
type NoopImageSubstitutor struct{}

// Description returns a description of what is expected from this Substitutor,
// which is used in logs.
func (s NoopImageSubstitutor) Description() string {
    return "NoopImageSubstitutor (noop)"
}

// Substitute returns the original image, without any change
func (s NoopImageSubstitutor) Substitute(image string) (string, error) {
    return image, nil
}

Using the WithImageSubstitutors options, you could define your own substitutions to the container images. E.g. adding a prefix to the images so that they can be pulled from a Docker registry other than Docker Hub. This is the usual mechanism for using Docker image proxies, caches, etc.

WithEnv

If you need to either pass additional environment variables to a container or override them, you can use testcontainers.WithEnv for example:

postgres, err = postgresModule.RunContainer(ctx, testcontainers.WithEnv(map[string]string{"POSTGRES_INITDB_ARGS": "--no-sync"}))

WithLogConsumers

If you need to consume the logs of the container, you can use testcontainers.WithLogConsumers with a valid log consumer. An example of a log consumer is the following:

type TestLogConsumer struct {
    Msgs []string
}

func (g *TestLogConsumer) Accept(l Log) {
    g.Msgs = append(g.Msgs, string(l.Content))
}

WithLogger

If you need to either pass logger to a container, you can use testcontainers.WithLogger.

Info

Consider calling this before other "With" functions as these may generate logs.

In this example we also use TestLogger which writes to the passed in testing.TB using Logf. The result is that we capture all logging from the container into the test context meaning its hidden behind go test -v and is associated with the relevant test, providing the user with useful context instead of appearing out of band.

func TestHandler(t *testing.T) {
    logger := TestLogger(t)
    _, err := postgresModule.RunContainer(ctx, testcontainers.WithLogger(logger))
    require.NoError(t, err)
    // Do something with container.
}

Please read the Following Container Logs documentation for more information about creating log consumers.

Wait Strategies

If you need to set a different wait strategy for the container, you can use testcontainers.WithWaitStrategy with a valid wait strategy.

Info

The default deadline for the wait strategy is 60 seconds.

At the same time, it's possible to set a wait strategy and a custom deadline with testcontainers.WithWaitStrategyAndDeadline.

Startup Commands

Testcontainers exposes the WithStartupCommand(e ...Executable) option to run arbitrary commands in the container right after it's started.

Info

To better understand how this feature works, please read the Create containers: Lifecycle Hooks documentation.

It also exports an Executable interface, defining the following methods:

  • AsCommand(), which returns a slice of strings to represent the command and positional arguments to be executed in the container;
  • Options(), which returns the slice of functional options with the Docker's ExecConfigs used to create the command in the container (the working directory, environment variables, user executing the command, etc) and the possible output format (Multiplexed).

You could use this feature to run a custom script, or to run a command that is not supported by the module right after the container is started.

Ready Commands

Testcontainers exposes the WithAfterReadyCommand(e ...Executable) option to run arbitrary commands in the container right after it's ready, which happens when the defined wait strategies have finished with success.

Info

To better understand how this feature works, please read the Create containers: Lifecycle Hooks documentation.

It leverages the Executable interface to represent the command and positional arguments to be executed in the container.

You could use this feature to run a custom script, or to run a command that is not supported by the module right after the container is ready.

WithNetwork

By default, the container is started in the default Docker network. If you want to use an already existing Docker network you created in your code, you can use the network.WithNetwork(aliases []string, nw *testcontainers.DockerNetwork) option, which receives an alias as parameter and your network, attaching the container to it, and setting the network alias for that network.

In the case you need to retrieve the network name, you can simply read it from the struct's Name field. E.g. nw.Name.

Warning

This option is not checking whether the network exists or not. If you use a network that doesn't exist, the container will start in the default Docker network, as in the default behavior.

WithNewNetwork

If you want to attach your containers to a throw-away network, you can use the network.WithNewNetwork(ctx context.Context, aliases []string, opts ...network.NetworkCustomizer) option, which receives an alias as parameter, creating the new network with a random name, attaching the container to it, and setting the network alias for that network.

In the case you need to retrieve the network name, you can use the Networks(ctx) method of the Container interface, right after it's running, which returns a slice of strings with the names of the networks where the container is attached.

Docker type modifiers

If you need an advanced configuration for the container, you can leverage the following Docker type modifiers:

  • testcontainers.WithConfigModifier
  • testcontainers.WithHostConfigModifier
  • testcontainers.WithEndpointSettingsModifier

Please read the Create containers: Advanced Settings documentation for more information.

Customising the ContainerRequest

This option will merge the customized request into the module's own ContainerRequest.

container, err := RunContainer(ctx,
    /* Other module options */
    testcontainers.CustomizeRequest(testcontainers.GenericContainerRequest{
        ContainerRequest: testcontainers.ContainerRequest{
            Cmd: []string{"-c", "log_statement=all"},
        },
    }),
)

The above example is updating the predefined command of the image, appending them to the module's command.

Info

This can't be used to replace the command, only to append options.

WithManifest

The WithManifest option loads a manifest obtained from a local file into the cluster. K3s applies it automatically during the startup process

func WithManifest(manifestPath string) testcontainers.CustomizeRequestOption

Example:

        WithManifest("nginx-manifest.yaml")

Container Methods

The K3s container exposes the following methods:

GetKubeConfig

The GetKubeConfig method returns the K3s cluster's kubeconfig, including the server URL, to be used for connecting to the Kubernetes Rest Client API using a Kubernetes client. It'll be returned in the format of []bytes.

package k3s_test

import (
    "context"
    "fmt"
    "log"

    v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"

    "github.com/testcontainers/testcontainers-go"
    "github.com/testcontainers/testcontainers-go/modules/k3s"
)

func ExampleRunContainer() {
    // runK3sContainer {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        log.Fatalf("failed to start container: %s", err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            log.Fatalf("failed to terminate container: %s", err)
        }
    }()
    // }

    state, err := k3sContainer.State(ctx)
    if err != nil {
        log.Fatalf("failed to get container state: %s", err) // nolint:gocritic
    }

    fmt.Println(state.Running)

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        log.Fatalf("failed to get kubeconfig: %s", err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        log.Fatalf("failed to create rest config: %s", err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        log.Fatalf("failed to create k8s client: %s", err)
    }

    nodes, err := k8s.CoreV1().Nodes().List(ctx, v1.ListOptions{})
    if err != nil {
        log.Fatalf("failed to list nodes: %s", err)
    }

    fmt.Println(len(nodes.Items))

    // Output:
    // true
    // 1
}

LoadImages

The LoadImages method loads a list of images into the kubernetes cluster and makes them available to pods.

This is useful for testing images generated locally without having to push them to a public docker registry or having to configure k3s to use a private registry.

The images must be already present in the node running the test. DockerProvider offers a method for pulling images, which can be used from the test code to ensure the image is present locally before loading them to the cluster.