commit a267b572a36a5097640c96e38ac4b57673719927 Author: Sergio Talens-Oliag Date: Mon Jul 17 23:34:46 2023 +0200 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f236e41 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +yaml/ diff --git a/README.md b/README.md new file mode 100644 index 0000000..2c6ff87 --- /dev/null +++ b/README.md @@ -0,0 +1,7 @@ +# Testing cilium with k3d and kind + +This repository contains scripts and templates to test cilium on a Linux server +using `k3d` or `kind` with `docker`. + +The documentation about how to use them is available on this [blog +post](https://blogops.mixinet.net/posts/testing_cilium_with_k3d_and_kind/). diff --git a/bin/k3d-entrypoint-cilium.sh b/bin/k3d-entrypoint-cilium.sh new file mode 100755 index 0000000..82eb6df --- /dev/null +++ b/bin/k3d-entrypoint-cilium.sh @@ -0,0 +1,18 @@ +#!/bin/sh +# ---- +# File: k3d-entrypoint-cilium.sh +# Description: Script to be run on k3d clusters to be able to use cilium +# Author: Sergio Talens-Oliag +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- + +set -e + +echo "Mounting bpf on node" +mount bpffs -t bpf /sys/fs/bpf +mount --make-shared /sys/fs/bpf + +echo "Mounting cgroups v2 to /run/cilium/cgroupv2 on node" +mkdir -p /run/cilium/cgroupv2 +mount -t cgroup2 none /run/cilium/cgroupv2 +mount --make-shared /run/cilium/cgroupv2/ diff --git a/sbin/cilium-install.sh b/sbin/cilium-install.sh new file mode 100755 index 0000000..77340f9 --- /dev/null +++ b/sbin/cilium-install.sh @@ -0,0 +1,362 @@ +#!/bin/sh +# ---- +# File: cilium-install.sh +# Description: Tool to install k8s cilium test clusters using k3d or kind +# Author: Sergio Talens-Oliag +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- + +set -e + +# Compute WORK_DIR +SCRIPT="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT")" +WORK_DIR_RELPATH=".." +WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")" +TMPL_DIR="$WORK_DIR/tmpl" +YAML_DIR="$WORK_DIR/yaml" + +# --------- +# VARIABLES +# --------- + +GATEWAY_API_ENABLED="${GATEWAY_API_ENABLED:-false}" +INGRESS_CONTROLLER_DEFAULT="${INGRESS_CONTROLLER_DEFAULT:-false}" +INGRESS_CONTROLLER_ENABLED="${INGRESS_CONTROLLER_ENABLED:-false}" +LOADBALANCER_MODE="shared" + +TUNNEL="vxlan" + +K3D_NETWORK_NAME="cilium" +K3D_NET_PREFIX="172.30" +K3D_CLUSTER_SUBNET_PREFIX="10.1" +K3D_SERVICE_SUBNET_PREFIX="10.10" +KIND_NETWORK_NAME="kind" +KIND_NET_PREFIX="172.31" +KIND_CLUSTER_SUBNET_PREFIX="10.2" +KIND_SERVICE_SUBNET_PREFIX="10.20" + +NETWORK_TYPE="bridge" + +METALLB_ENABLED="true" +METALLB_BASE_URL="https://raw.githubusercontent.com/metallb/metallb" +METALLB_VERSION="v0.13.9" +METALLB_DEPLOY_YAML="config/manifests/metallb-native.yaml" +METALLB_YAML_URL="$METALLB_BASE_URL/$METALLB_VERSION/$METALLB_DEPLOY_YAML" +METALLB_YAML="$YAML_DIR/metallb-native.yaml" + +NGINX_IC_ENABLED="true" +NGINX_IC_BASE_URL="https://raw.githubusercontent.com/kubernetes/ingress-nginx" +NGINX_IC_VERSION="controller-v1.7.0" +NGINX_IC_DEPLOY_YAML="deploy/static/provider/cloud/deploy.yaml" +NGINX_IC_YAML_URL="$NGINX_IC_BASE_URL/$NGINX_IC_VERSION/$NGINX_IC_DEPLOY_YAML" +NGINX_IC_YAML="$YAML_DIR/ingress-nginx-deploy.yaml" + +# GOTMPLs +TMPL_K3D_CONFIG_YAML="$TMPL_DIR/k3d-config.yaml" +TMPL_KIND_CONFIG_YAML="$TMPL_DIR/kind-config.yaml" +TMPL_IPPOOLS_YAML="$TMPL_DIR/ippools.yaml" +TMPL_CILIUM_YAML="$TMPL_DIR/cilium.yaml" +TMPL_METALLB_CRDS_YAML="$TMPL_DIR/metallb-crds.yaml" + +# Adjust variables based on other variables +if [ "$METALLB_ENABLED" = "true" ]; then + BGP_CONTROL_PLANE_ENABLED="false" +else + BGP_CONTROL_PLANE_ENABLED="true" +fi + +# --------- +# FUNCTIONS +# --------- + +create_network() { + NETWORK_ID="$( + docker network inspect "$NETWORK_NAME" --format "{{.Id}}" 2>/dev/null + )" || true + if [ "$NETWORK_ID" ]; then + echo "Using existing network '$NETWORK_NAME' with id '$NETWORK_ID'" + else + echo "Creating network '$NETWORK_NAME' in docker" + docker network create \ + --driver "$NETWORK_TYPE" \ + --subnet "$NETWORK_SUBNET" \ + --gateway "$NETWORK_GATEWAY" \ + --ip-range "$NETWORK_IP_RANGE" \ + "$NETWORK_NAME" + fi +} + +create_cluster() { + echo "Creating $CTOOL cluster '$CNAME'" + case "$CTOOL" in + k3d) + tmpl \ + -v "cnum=$CNUM" \ + -v "cname=$CNAME" \ + -v "host_ip=$HOST_IP" \ + -v "cluster_subnet=$CLUSTER_SUBNET" \ + -v "service_subnet=$SERVICE_SUBNET" \ + -v "work_dir=$WORK_DIR" \ + "$TMPL_K3D_CONFIG_YAML" | + k3d cluster create -c - + ;; + kind) + tmpl \ + -v "cnum=$CNUM" \ + -v "cname=$CNAME" \ + -v "host_ip=$HOST_IP" \ + -v "cluster_subnet=$CLUSTER_SUBNET" \ + -v "service_subnet=$SERVICE_SUBNET" \ + -v "work_dir=$WORK_DIR" \ + "$TMPL_KIND_CONFIG_YAML" | + kind create cluster --config="-" + ;; + esac + echo "Cluster '$CNAME' info" + kubectl --context "$CTX" cluster-info +} + +install_gateway_api_crds() { + BASE_URL="https://raw.githubusercontent.com/kubernetes-sigs/gateway-api" + BASE_URL="$BASE_URL/v0.5.1/config/crd" + echo "Installing GatewayAPI CRDs" + for crd_yaml in standard/gateway.networking.k8s.io_gatewayclasses.yaml \ + standard/gateway.networking.k8s.io_gateways.yaml \ + standard/gateway.networking.k8s.io_httproutes.yaml \ + experimental/gateway.networking.k8s.io_referencegrants.yaml; do + kubectl --context "$CTX" apply -f "$BASE_URL/$crd_yaml" + done +} + +cilium_status() { + echo "Checking cilium status" + cilium status --wait --context "$CTX" +} + +master_node_ip() { + # If we are not running kube-proxy the cilium Pods can't reach the api server + # because the in-cluster service can't be reached, to fix the issue we use an + # internal IP that the pods can reach, in this case we get the internal IP of + # the master node container + case "$CTOOL" in + k3d) MASTER_NODE="node/$CTX-server-0";; + kind) MASTER_NODE="node/$CNAME-control-plane";; + *) echo "Unknown master node"; exit 1;; + esac + kubectl --context "$CTX" get "$MASTER_NODE" -o wide --no-headers | + awk '{ print $6 }' +} + +cilium_cli_install() { + if [ "$GATEWAY_API_ENABLED" = "true" ]; then + install_gateway_api_crds + fi + _xtra_args="" + if [ "$CNUM" = "2" ]; then + _xtra_args="--inherit-ca kind-cilium1" + fi + MASTER_NODE_IP="$(master_node_ip)" + # shellcheck disable=SC2086 + tmpl \ + -v "master_node_ip=$MASTER_NODE_IP" \ + -v "cnum=$CNUM" \ + -v "cname=$CNAME" \ + -v "bgp_control_plane_enabled=$BGP_CONTROL_PLANE_ENABLED" \ + -v "gateway_api_enabled=$GATEWAY_API_ENABLED" \ + -v "ingress_controller_default=$INGRESS_CONTROLLER_DEFAULT" \ + -v "ingress_controller_enabled=$INGRESS_CONTROLLER_ENABLED" \ + -v "loadbalancer_mode=$LOADBALANCER_MODE" \ + -v "tunnel=$TUNNEL" \ + "$TMPL_CILIUM_YAML" | + cilium install --context "$CTX" --helm-values - $_xtra_args + # Wait for the deployment + cilium_status + echo "Enabling hubble" + cilium hubble enable --ui --context "$CTX" +} + +cilium_helm_install() { + if [ "$GATEWAY_API_ENABLED" = "true" ]; then + install_gateway_api_crds + fi + helm repo add cilium https://helm.cilium.io/ >/dev/null || true + # Copy the cilium-ca to the second cluster + if [ "$CNUM" = "2" ]; then + echo "Copying the 'cilium-ca' from '$CTOOL-cilium1' to '$CTX'" + kubectl --context "$CTOOL-cilium1" -n kube-system get secrets/cilium-ca \ + -o yaml | kubectl apply --context "$CTX" -f - + fi + MASTER_NODE_IP="$(master_node_ip)" + # shellcheck disable=SC2086 + tmpl \ + -v "master_node_ip=$MASTER_NODE_IP" \ + -v "cnum=$CNUM" \ + -v "cname=$CNAME" \ + -v "bgp_control_plane_enabled=$BGP_CONTROL_PLANE_ENABLED" \ + -v "gateway_api_enabled=$GATEWAY_API_ENABLED" \ + -v "ingress_controller_default=$INGRESS_CONTROLLER_DEFAULT" \ + -v "ingress_controller_enabled=$INGRESS_CONTROLLER_ENABLED" \ + -v "loadbalancer_mode=$LOADBALANCER_MODE" \ + -v "tunnel=$TUNNEL" \ + "$TMPL_CILIUM_YAML" | + helm upgrade --install cilium cilium/cilium --version 1.13.1 \ + --kube-context "$CTX" --namespace=kube-system --values=- +} + +cilium_install(){ + echo "Installing cilium in cluster '$CNAME'" + cilium_helm_install + cilium_status +} + +lb_download_yaml() { + [ -d "$YAML_DIR" ] || mkdir "$YAML_DIR" + curl -fsSL -o "$METALLB_YAML" "$METALLB_YAML_URL" +} + +lb_install() { + if [ "$METALLB_ENABLED" = "true" ]; then + if [ ! -f "$METALLB_YAML" ]; then + lb_download_yaml + fi + echo "Installing metallb on kind cluster '$CNAME'" + kubectl --context "$CTX" apply -f "$METALLB_YAML" + echo "Waiting for metallb to be ready" + kubectl --context "$CTX" rollout status deployment --timeout="120s" \ + -n "metallb-system" "controller" + echo "Configuring metallb" + tmpl -v "lb_pool_range=$LB_POOL_RANGE" "$TMPL_METALLB_CRDS_YAML" | + kubectl --context "$CTX" apply -f - + elif [ "$BGP_CONTROL_PLANE_ENABLED" = "true" ]; then + echo "Adding LB IPAM Pools" + tmpl -v "lb_pool_cdir=$LB_POOL_CDIR" "$TMPL_IPPOOLS_YAML" | + kubectl --context "$CTX" apply -f - + fi +} + +ingress_download_yaml() { + [ -d "$YAML_DIR" ] || mkdir "$YAML_DIR" + curl -fsSL -o "$NGINX_IC_YAML" "$NGINX_IC_YAML_URL" +} + +ingress_install() { + if [ "$NGINX_IC_ENABLED" = "true" ]; then + if [ ! -f "$NGINX_IC_YAML" ]; then + ingress_download_yaml + fi + echo "Installing nginx ingress controller on kind cluster '$CNAME'" + kubectl --context "$CTX" apply -f "$NGINX_IC_YAML" + echo "Waiting for the nginx controller to be ready" + kubectl --context "$CTX" wait --namespace ingress-nginx \ + --for=condition=ready pod \ + --selector=app.kubernetes.io/component=controller \ + --timeout=120s + fi +} + +mesh_install() { + echo "Enabling cluster-mesh on cluster '$CNAME'" + cilium clustermesh enable --context "$CTX" --service-type LoadBalancer + echo "Checking cilium status on cluster '$CNAME'" + cilium status --context "$CTX" --wait + if [ "$CNUM" -eq "2" ]; then + echo "Connecting cluster" + cilium clustermesh connect --context "$CTOOL-cilium1" \ + --destination-context "$CTOOL-cilium2" + echo "Checking cilium status on cluster '$CNAME'" + cilium status --context "$CTX" --wait + fi +} + +usage() { + cat < +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- + +set -e + +# --------- +# VARIABLES +# --------- +K3D_NETWORK_NAME="cilium" +KIND_NETWORK_NAME="kind" + +# --------- +# FUNCTIONS +# --------- + +delete_network() { + NETWORK_ID="$( + docker network inspect "$NETWORK_NAME" --format "{{.Id}}" 2>/dev/null + )" || true + if [ "$NETWORK_ID" ]; then + echo "Removing network '$NETWORK_NAME' with id '$NETWORK_ID'" + docker network rm "$NETWORK_NAME" + else + echo "Network '$NETWORK_NAME' not found in docker" + fi +} + +delete_cluster() { + case "$CTOOL" in + k3d) + echo "Deleting k3d cluster '$CNAME'" + k3d cluster delete "$CNAME" + ;; + kind) + echo "Deleting kind cluster '$CNAME'" + kind delete cluster -n "$CNAME" + ;; + esac +} + +usage() { + cat < +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- + +set -e + +# --------- +# Variables +# --------- + +# System dirs +BASH_COMPLETION="/etc/bash_completion.d" +ZSH_COMPLETIONS="/usr/share/zsh/vendor-completions" + +# Terminal related variables +if [ "$TERM" ] && type tput >/dev/null; then + bold="$(tput bold)" + normal="$(tput sgr0)" +else + bold="" + normal="" +fi +export yes_no="(${bold}Y${normal}es/${bold}N${normal}o)" + +# Versions +CILIUM_CLI_VERSION="${CILIUM_CLI_VERSION:-v0.13.2}" +# Uncomment to get the latest helm version +# GET_HELM_URL="https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3" +HELM_VERSION="${HELM_VERSION:-3.11.2}" +HUBBLE_VERSION="${HUBBLE_VERSION:-v0.11.3}" +K3D_VERSION="${K3D_VERSION:-v5.4.9}" +KIND_VERSION="${KIND_VERSION:-v0.18.0}" +KUBECTL_VERSION="${KUBECTL_VERSION:-1.26.3}" +TMPL_VERSION="${TMPL_VERSION:-v0.4.0}" + +# --------- +# Functions +# --------- + +# Auxiliary function to read a boolean value. $1 text to show - $2 default value +read_bool() { + case "${2}" in + y | Y | yes | Yes | YES | true | True | TRUE) _yn="Yes" ;; + *) _yn="No" ;; + esac + printf "%s ${yes_no} [%s]: " "${1}" "${bold}${_yn}${normal}" + read -r READ_VALUE + case "${READ_VALUE}" in + '') [ "$_yn" = "Yes" ] && READ_VALUE="true" || READ_VALUE="false" ;; + y | Y | yes | Yes | YES | true | True | TRUE) READ_VALUE="true" ;; + *) READ_VALUE="false" ;; + esac +} + +# Auxiliary function to check if a boolean value is set to yes/true or not +is_selected() { + case "${1}" in + y | Y | yes | Yes | YES | true | True | TRUE) return 0 ;; + *) return 1 ;; + esac +} + +# Auxiliary function to check if an application is installed +tools_app_installed() { + _app="$1" + type "$_app" >/dev/null 2>&1 || return 1 +} + +# Function to check if all the tools are installed +tools_check_apps_installed() { + _missing="" + for _app in "$@"; do + tools_app_installed "$_app" || _missing="$_missing $_app" + done + if [ "$_missing" ]; then + echo "The following apps could not be found:" + for _app in $_missing; do + echo "- $_app" + done + exit 1 + fi +} + +# Auxiliary function to check if we want to install an app +tools_install_app() { + _app="$1" + if tools_app_installed "$_app"; then + echo "$_app found ($(type "$_app"))." + MSG="Re-install in /usr/local/bin?" + OPT="false" + else + echo "$_app could not be found." + MSG="Install it in /usr/local/bin?" + OPT="true" + fi + # Export NONINTERACTIVE as 'true' to use default values + if [ "$NONINTERACTIVE" = "true" ]; then + READ_VALUE="$OPT" + else + read_bool "$MSG" "$OPT" + fi + is_selected "${READ_VALUE}" && return 0 || return 1 +} + +tools_check_cilium() { + if tools_install_app "cilium"; then + tmp_dir="$(mktemp -d)" + os="$(uname -s | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://github.com/cilium/cilium-cli/releases/download" + url="$url/${CILIUM_CLI_VERSION}/cilium-linux-$arch.tar.gz" + curl -fsSL -o "$tmp_dir/cilium.tar.gz" "$url" + tar xzf "$tmp_dir/cilium.tar.gz" -C "$tmp_dir" "cilium" + sudo install "$tmp_dir/cilium" /usr/local/bin + rm -rf "$tmp_dir" + cilium version + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "cilium completion bash > $BASH_COMPLETION/cilium" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "cilium completion zsh > $ZSH_COMPLETIONS/_cilium" + fi + fi +} + +tools_check_docker() { + if tools_install_app "docker"; then + tmp_dir="$(mktemp -d)" + curl -fsSL -o "$tmp_dir/install-docker.sh" "https://get.docker.com" + sh "$tmp_dir/install-docker.sh" + rm -rf "$tmp_dir" + sudo usermod -aG docker "$(id -un)" + docker --version + fi +} + +tools_check_helm() { + if tools_install_app "helm"; then + tmp_dir="$(mktemp -d)" + if [ "$GET_HELM" ]; then + curl -fsSL -o "$tmp_dir/get_helm.sh" "$GET_HELM" + bash "$tmp_dir/get_helm.sh" + else + os="$(uname -s | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://get.helm.sh/helm-v$HELM_VERSION-$os-$arch.tar.gz" + curl -fsSL -o "$tmp_dir/helm.tar.gz" "$url" + tar xzf "$tmp_dir/helm.tar.gz" -C "$tmp_dir" "$os-$arch/helm" + sudo install "$tmp_dir/$os-$arch/helm" /usr/local/bin + fi + rm -rf "$tmp_dir" + helm version + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "helm completion bash > $BASH_COMPLETION/helm" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "helm completion zsh > $ZSH_COMPLETIONS/_helm" + fi + fi +} + +tools_check_hubble() { + if tools_install_app "hubble"; then + tmp_dir="$(mktemp -d)" + os="$(uname -s | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://github.com/cilium/hubble/releases/download" + url="$url/${HUBBLE_VERSION}/hubble-linux-$arch.tar.gz" + curl -fsSL -o "$tmp_dir/hubble.tar.gz" "$url" + tar xzf "$tmp_dir/hubble.tar.gz" -C "$tmp_dir" "hubble" + sudo install "$tmp_dir/hubble" /usr/local/bin + rm -rf "$tmp_dir" + hubble version + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "hubble completion bash > $BASH_COMPLETION/hubble" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "hubble completion zsh > $ZSH_COMPLETIONS/_hubble" + fi + fi +} + +tools_check_k3d() { + if tools_install_app "k3d"; then + [ -d /usr/local/bin ] || sudo mkdir /usr/local/bin + curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | + TAG="$K3D_VERSION" bash + k3d version + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "k3d completion bash > $BASH_COMPLETION/k3d" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "k3d completion zsh > $ZSH_COMPLETIONS/_k3d" + fi + fi +} + +tools_check_kind() { + if tools_install_app "kind"; then + os="$(uname | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://github.com/kubernetes-sigs/kind/releases/download" + url="$url/$KIND_VERSION/kind-$os-$arch" + [ -d /usr/local/bin ] || sudo mkdir /usr/local/bin + tmp_dir="$(mktemp -d)" + curl -fsSL -o "$tmp_dir/kind" "$url" + sudo install "$tmp_dir/kind" /usr/local/bin/ + rm -rf "$tmp_dir" + kind version + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "kind completion bash > $BASH_COMPLETION/kind" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "kind completion zsh > $ZSH_COMPLETIONS/_kind" + fi + fi +} + + +tools_check_kubectl() { + if tools_install_app "kubectl"; then + os="$(uname | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://dl.k8s.io/release/v$KUBECTL_VERSION/bin/$os/$arch/kubectl" + [ -d /usr/local/bin ] || sudo mkdir /usr/local/bin + tmp_dir="$(mktemp -d)" + curl -fsSL -o "$tmp_dir/kubectl" "$url" + sudo install "$tmp_dir/kubectl" /usr/local/bin/ + rm -rf "$tmp_dir" + kubectl version --client --output=yaml + if [ -d "$BASH_COMPLETION" ]; then + sudo sh -c "kubectl completion bash > $BASH_COMPLETION/kubectl" + fi + if [ -d "$ZSH_COMPLETIONS" ]; then + sudo sh -c "kubectl completion zsh > $ZSH_COMPLETIONS/_kubectl" + fi + fi +} + +tools_check_tmpl() { + if tools_install_app "tmpl"; then + tmp_dir="$(mktemp -d)" + os="$(uname -s | tr '[:upper:]' '[:lower:]')" + case "$(uname -m)" in + x86_64) arch="amd64" ;; + esac + url="https://github.com/krakozaure/tmpl/releases/download" + url="$url/${TMPL_VERSION}/tmpl-linux_$arch" + curl -fsSL -o "$tmp_dir/tmpl" "$url" + sudo install "$tmp_dir/tmpl" /usr/local/bin + rm -rf "$tmp_dir" + fi +} +tools_check() { + for _app in "$@"; do + case "$_app" in + cilium) tools_check_cilium;; + docker) tools_check_docker ;; + helm) tools_check_helm ;; + k3d) tools_check_k3d ;; + kind) tools_check_kind ;; + kubectl) tools_check_kubectl ;; + hubble) tools_check_hubble;; + tmpl) tools_check_tmpl ;; + *) echo "Unknown application '$_app'" ;; + esac + done +} + +tools_apps_list() { + tools="cilium docker helm k3d kind kubectl hubble tmpl" + echo "$tools" +} + +# Usage function +usage() { + cat < +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- + +set -e + +# --------- +# VARIABLES +# --------- + +HUBBLE_PF="${HUBBLE_PF:-false}" + +# --------- +# FUNCTIONS +# --------- + +usage() { + cat </dev/null 1>&2; then + echo "Describe current policy" + kubectl -n "$NAMESPACE" describe CiliumNetworkPolicy/rule1 + else + echo "Policy not installed" + fi + ;; +eps|endpoints) list_sw_endpoints;; +policy-l34) + echo "Adding SW L3-L4 policy" + echo "" + cat "$SW_L3_L4_POLICY_YAML" + echo "" + kubectl -n "$NAMESPACE" apply -f "$SW_L3_L4_POLICY_YAML" +;; +policy-l7) + echo "Adding SW L3-L4-L7 policy:" + echo "" + cat "$SW_L3_L4_L7_POLICY_YAML" + echo "" + kubectl -n "$NAMESPACE" apply -f "$SW_L3_L4_L7_POLICY_YAML" +;; +policy-none) + echo "Removing Cilium Network Policy 'rule1'" + kubectl -n "$NAMESPACE" delete CiliumNetworkPolicy/rule1 +;; +status) status;; +test) + echo "Running access test" + access_test +;; +"") usage "0" ;; +*) usage "1" ;; +esac + +# ---- +# vim: ts=2:sw=2:et:ai:sts=2 diff --git a/test/http-sw/http-sw-app.yaml b/test/http-sw/http-sw-app.yaml new file mode 100644 index 0000000..26593bf --- /dev/null +++ b/test/http-sw/http-sw-app.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: deathstar + labels: + app.kubernetes.io/name: deathstar +spec: + type: ClusterIP + ports: + - port: 80 + selector: + org: empire + class: deathstar +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deathstar + labels: + app.kubernetes.io/name: deathstar +spec: + replicas: 2 + selector: + matchLabels: + org: empire + class: deathstar + template: + metadata: + labels: + org: empire + class: deathstar + app.kubernetes.io/name: deathstar + spec: + containers: + - name: deathstar + image: docker.io/cilium/starwars +--- +apiVersion: v1 +kind: Pod +metadata: + name: tiefighter + labels: + org: empire + class: tiefighter + app.kubernetes.io/name: tiefighter +spec: + containers: + - name: spaceship + image: docker.io/tgraf/netperf +--- +apiVersion: v1 +kind: Pod +metadata: + name: xwing + labels: + app.kubernetes.io/name: xwing + org: alliance + class: xwing +spec: + containers: + - name: spaceship + image: docker.io/tgraf/netperf diff --git a/test/http-sw/sw_l3_l4_l7_policy.yaml b/test/http-sw/sw_l3_l4_l7_policy.yaml new file mode 100644 index 0000000..e1ed4c4 --- /dev/null +++ b/test/http-sw/sw_l3_l4_l7_policy.yaml @@ -0,0 +1,22 @@ +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: "rule1" +spec: + description: "L7 policy to restrict access to specific HTTP call" + endpointSelector: + matchLabels: + org: empire + class: deathstar + ingress: + - fromEndpoints: + - matchLabels: + org: empire + toPorts: + - ports: + - port: "80" + protocol: TCP + rules: + http: + - method: "POST" + path: "/v1/request-landing" diff --git a/test/http-sw/sw_l3_l4_policy.yaml b/test/http-sw/sw_l3_l4_policy.yaml new file mode 100644 index 0000000..888b050 --- /dev/null +++ b/test/http-sw/sw_l3_l4_policy.yaml @@ -0,0 +1,18 @@ +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: "rule1" +spec: + description: "L3-L4 policy to restrict deathstar access to empire ships only" + endpointSelector: + matchLabels: + org: empire + class: deathstar + ingress: + - fromEndpoints: + - matchLabels: + org: empire + toPorts: + - ports: + - port: "80" + protocol: TCP diff --git a/test/ingress-basic.sh b/test/ingress-basic.sh new file mode 100755 index 0000000..8a3ec72 --- /dev/null +++ b/test/ingress-basic.sh @@ -0,0 +1,114 @@ +#!/bin/sh +# ---- +# File: ingress-basic.sh +# Description: Script to test the ingress services on our cilium deployments +# Author: Sergio Talens-Oliag +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- +# REF: https://docs.cilium.io/en/latest/network/servicemesh/http/ +# ---- + +set -e + +# Compute WORK_DIR +SCRIPT="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT")" +WORK_DIR_RELPATH="." +WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")" + +# VARIABLES +NAMESPACE="ingress-basic" +YAML_DIR="$WORK_DIR/ingress-basic" +BOOKINFO_YAML="$YAML_DIR/bookinfo.yaml" + +create_deployment() { + kubectl create ns "$NAMESPACE" || true + kubectl apply -n "$NAMESPACE" -f "$BOOKINFO_YAML" + kubectl apply -n "$NAMESPACE" -f "$INGRESS_BASIC_YAML" +} + +delete_deployment() { + kubectl delete ns "$NAMESPACE" +} + +wait_for_deployments() { + for _deployment in productpage-v1 details-v1; do + echo "Waiting for '$_deployment' deployment to be ready" + kubectl wait -n "$NAMESPACE" deployment "$_deployment" \ + --for condition=Available=True --timeout=90s + done +} + +wait_for_ingress(){ + printf "Waiting for the ingress to be ready " + while true; do + INGRESS="$( + kubectl get -n "$NAMESPACE" ingress \ + -o jsonpath="{.items[0].status.loadBalancer.ingress}" + )" + if [ -z "$INGRESS" ]; then + printf "." + sleep 1 + else + echo ". OK" + break + fi + done +} + +print_objects() { + kubectl get -n "$NAMESPACE" pods + kubectl get -n "$NAMESPACE" svc + kubectl get -n "$NAMESPACE" ingress + kubectl get -n "$INGRESS_NAMESPACE" "$INGRESS_CONTROLLER" +} + +test_ingress() { + HTTP_INGRESS="$( + kubectl get -n "$INGRESS_NAMESPACE" "$INGRESS_CONTROLLER" \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' + )" + URL="http://$HTTP_INGRESS/details/1" + echo "Testing 'details-v1' service connecting to '$URL'" + curl -s --fail "$URL" | jq + URL="http://$HTTP_INGRESS/" + echo "Testing 'productpage-v1' service connecting to '$URL' (10 first lines)" + curl -s --fail "$URL" | head -n 10 +} + +usage() { + echo "Usage: $0 cilium|nginx create|delete|status|test|wait" + exit "$1" +} + +# ---- +# MAIN +# ---- + +case "$1" in +cilium) + # We assume that the cilium ingress is shared + INGRESS_NAMESPACE="kube-system" + INGRESS_CONTROLLER="service/cilium-ingress" + INGRESS_BASIC_YAML="$YAML_DIR/ingress-basic-cilium.yaml" +;; +nginx) + INGRESS_NAMESPACE="ingress-nginx" + INGRESS_CONTROLLER="service/ingress-nginx-controller" + INGRESS_BASIC_YAML="$YAML_DIR/ingress-basic-nginx.yaml" +;; +"") usage 0;; +*) usage 1;; +esac + +case "$2" in +create) create_deployment;; +delete) delete_deployment;; +status) print_objects;; +test) test_ingress;; +wait) wait_for_deployments && wait_for_ingress;; +*) usage 1;; +esac + +# ---- +# vim: ts=2:sw=2:et:ai:sts=2 diff --git a/test/ingress-basic/bookinfo.yaml b/test/ingress-basic/bookinfo.yaml new file mode 100644 index 0000000..f14a58b --- /dev/null +++ b/test/ingress-basic/bookinfo.yaml @@ -0,0 +1,343 @@ +# Copyright Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# This file defines the services, service accounts, and deployments for the Bookinfo sample. +# +# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml +# +# Alternatively, you can deploy any resource separately: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment +################################################################################################## + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + volumes: + - name: tmp + emptyDir: {} +--- diff --git a/test/ingress-basic/ingress-basic-cilium.yaml b/test/ingress-basic/ingress-basic-cilium.yaml new file mode 100644 index 0000000..9a88c5e --- /dev/null +++ b/test/ingress-basic/ingress-basic-cilium.yaml @@ -0,0 +1,25 @@ +# Basic ingress for istio bookinfo demo application, which can be found in below +# https://raw.githubusercontent.com/istio/istio/release-1.11/samples/bookinfo/platform/kube/bookinfo.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-basic-cilium +spec: + ingressClassName: cilium + rules: + - http: + paths: + - backend: + service: + name: details + port: + number: 9080 + path: /details + pathType: Prefix + - backend: + service: + name: productpage + port: + number: 9080 + path: / + pathType: Prefix diff --git a/test/ingress-basic/ingress-basic-nginx.yaml b/test/ingress-basic/ingress-basic-nginx.yaml new file mode 100644 index 0000000..0b9b563 --- /dev/null +++ b/test/ingress-basic/ingress-basic-nginx.yaml @@ -0,0 +1,25 @@ +# Basic ingress for istio bookinfo demo application, which can be found in below +# https://raw.githubusercontent.com/istio/istio/release-1.11/samples/bookinfo/platform/kube/bookinfo.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-basic-nginx +spec: + ingressClassName: nginx + rules: + - http: + paths: + - backend: + service: + name: details + port: + number: 9080 + path: /details + pathType: Prefix + - backend: + service: + name: productpage + port: + number: 9080 + path: / + pathType: Prefix diff --git a/test/mesh-test.sh b/test/mesh-test.sh new file mode 100755 index 0000000..afc9115 --- /dev/null +++ b/test/mesh-test.sh @@ -0,0 +1,254 @@ +#!/bin/sh +# ---- +# File: mesh-basic.sh +# Description: Script to test the cluster mesh on our cilium deployments +# Author: Sergio Talens-Oliag +# Copyright: (c) 2023 Sergio Talens-Oliag +# ---- +# REF: https://docs.cilium.io/en/stable/network/clustermesh/services/ +# ---- + +set -e + +# Compute WORK_DIR +SCRIPT="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT")" +WORK_DIR_RELPATH="." +WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")" + +# VARIABLES +NAMESPACE="mesh-test" +SERVICE="svc/rebel-base" +DEPLOYMENT_RB="deployment/rebel-base" +DEPLOYMENT_XW="deployment/x-wing" +YAML_DIR="$WORK_DIR/mesh-test" +GSC1_YAML="$YAML_DIR/cluster1.yaml" +GSC2_YAML="$YAML_DIR/cluster2.yaml" +ACCESS_TEST_LOOPS="7" + +access_test() { + for ctx in "$CTX1" "$CTX2"; do + echo "Running $ACCESS_TEST_LOOPS tests from '$ctx'" + counter=0 + while [ "$counter" -lt "$ACCESS_TEST_LOOPS" ]; do + kubectl --context "$ctx" -n "$NAMESPACE" exec -ti "$DEPLOYMENT_XW" \ + -- curl rebel-base + counter="$((counter + 1))" + done + done +} + +create() { + for cn in "1" "2"; do + echo "Creating Global Service on Cluster $cn" + create_namespace "$cn" + deploy_objects "$cn" + done +} + +create_namespace() { + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + kubectl --context="$ctx" create ns "$NAMESPACE" || true +} + +deploy_objects() { + case "$1" in + 1) ctx="$CTX1"; yaml="$GSC1_YAML";; + 2) ctx="$CTX2"; yaml="$GSC2_YAML";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + sed -e "s/Cluster-/$CTOOL-cluster-/" "$yaml" | + kubectl --context="$ctx" -n "$NAMESPACE" apply -f - +} + +delete() { + for cn in "1" "2"; do + echo "Deleting Global Service on Cluster $cn" + delete_objects "$cn" || true + delete_namespace "$cn" + done +} + +delete_deployment() { + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + echo "Deleting '$DEPLOYMENT_RB' on Cluster $1" + kubectl --context="$ctx" -n "$NAMESPACE" delete "$DEPLOYMENT_RB" || true +} + +delete_namespace() { + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + kubectl --context="$ctx" delete ns "$NAMESPACE" || true +} + +delete_objects() { + case "$1" in + 1) ctx="$CTX1"; yaml="$GSC1_YAML";; + 2) ctx="$CTX2"; yaml="$GSC2_YAML";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + sed -e "s/Cluster-/$CTOOL-cluster-/" "$yaml" | + kubectl --context="$ctx" -n "$NAMESPACE" delete -f - +} + +get_cilium_annotations() { + for ctx in "$CTX1" "$CTX2"; do + echo "Service '$SERVICE' cilium annotations on '$ctx'" + kubectl --context "$ctx" -n "$NAMESPACE" get "$SERVICE" -o yaml | + sed -ne 's/^ service.cilium.io/- service.cilium.io/p' + done +} + +status() { + for ctx in "$CTX1" "$CTX2"; do + echo "Mesh test status on '$ctx'" + echo "" + kubectl --context "$ctx" -n "$NAMESPACE" get all + echo "" + done +} + +wait_for_deployments() { + for ctx in "$CTX1" "$CTX2"; do + for _deployment in "$DEPLOYMENT_RB" "$DEPLOYMENT_XW"; do + echo "Waiting for '$_deployment' to be ready on '$ctx'" + kubectl wait --context="$ctx" -n "$NAMESPACE" "$_deployment" \ + --for condition=Available=True --timeout=90s + done + done +} + +service_affinity_default(){ + kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/affinity- +} + + +service_affinity_local(){ + kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/affinity="local" --overwrite +} + +service_affinity_none(){ + kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/affinity="none" --overwrite +} + +service_affinity_remote(){ + kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/affinity="remote" --overwrite +} + +service_shared_default(){ + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/shared- +} + +service_shared_false(){ + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/shared="false" --overwrite +} + +service_shared_true(){ + case "$1" in + 1) ctx="$CTX1";; + 2) ctx="$CTX2";; + *) echo "Unknown cluster number '$1'"; exit 1;; + esac + kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \ + service.cilium.io/shared="true" --overwrite +} + +usage() { + cat <