Initial commit
This commit is contained in:
commit
a267b572a3
23 changed files with 2173 additions and 0 deletions
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
yaml/
|
7
README.md
Normal file
7
README.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Testing cilium with k3d and kind
|
||||
|
||||
This repository contains scripts and templates to test cilium on a Linux server
|
||||
using `k3d` or `kind` with `docker`.
|
||||
|
||||
The documentation about how to use them is available on this [blog
|
||||
post](https://blogops.mixinet.net/posts/testing_cilium_with_k3d_and_kind/).
|
18
bin/k3d-entrypoint-cilium.sh
Executable file
18
bin/k3d-entrypoint-cilium.sh
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: k3d-entrypoint-cilium.sh
|
||||
# Description: Script to be run on k3d clusters to be able to use cilium
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
echo "Mounting bpf on node"
|
||||
mount bpffs -t bpf /sys/fs/bpf
|
||||
mount --make-shared /sys/fs/bpf
|
||||
|
||||
echo "Mounting cgroups v2 to /run/cilium/cgroupv2 on node"
|
||||
mkdir -p /run/cilium/cgroupv2
|
||||
mount -t cgroup2 none /run/cilium/cgroupv2
|
||||
mount --make-shared /run/cilium/cgroupv2/
|
362
sbin/cilium-install.sh
Executable file
362
sbin/cilium-install.sh
Executable file
|
@ -0,0 +1,362 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: cilium-install.sh
|
||||
# Description: Tool to install k8s cilium test clusters using k3d or kind
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# Compute WORK_DIR
|
||||
SCRIPT="$(readlink -f "$0")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT")"
|
||||
WORK_DIR_RELPATH=".."
|
||||
WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")"
|
||||
TMPL_DIR="$WORK_DIR/tmpl"
|
||||
YAML_DIR="$WORK_DIR/yaml"
|
||||
|
||||
# ---------
|
||||
# VARIABLES
|
||||
# ---------
|
||||
|
||||
GATEWAY_API_ENABLED="${GATEWAY_API_ENABLED:-false}"
|
||||
INGRESS_CONTROLLER_DEFAULT="${INGRESS_CONTROLLER_DEFAULT:-false}"
|
||||
INGRESS_CONTROLLER_ENABLED="${INGRESS_CONTROLLER_ENABLED:-false}"
|
||||
LOADBALANCER_MODE="shared"
|
||||
|
||||
TUNNEL="vxlan"
|
||||
|
||||
K3D_NETWORK_NAME="cilium"
|
||||
K3D_NET_PREFIX="172.30"
|
||||
K3D_CLUSTER_SUBNET_PREFIX="10.1"
|
||||
K3D_SERVICE_SUBNET_PREFIX="10.10"
|
||||
KIND_NETWORK_NAME="kind"
|
||||
KIND_NET_PREFIX="172.31"
|
||||
KIND_CLUSTER_SUBNET_PREFIX="10.2"
|
||||
KIND_SERVICE_SUBNET_PREFIX="10.20"
|
||||
|
||||
NETWORK_TYPE="bridge"
|
||||
|
||||
METALLB_ENABLED="true"
|
||||
METALLB_BASE_URL="https://raw.githubusercontent.com/metallb/metallb"
|
||||
METALLB_VERSION="v0.13.9"
|
||||
METALLB_DEPLOY_YAML="config/manifests/metallb-native.yaml"
|
||||
METALLB_YAML_URL="$METALLB_BASE_URL/$METALLB_VERSION/$METALLB_DEPLOY_YAML"
|
||||
METALLB_YAML="$YAML_DIR/metallb-native.yaml"
|
||||
|
||||
NGINX_IC_ENABLED="true"
|
||||
NGINX_IC_BASE_URL="https://raw.githubusercontent.com/kubernetes/ingress-nginx"
|
||||
NGINX_IC_VERSION="controller-v1.7.0"
|
||||
NGINX_IC_DEPLOY_YAML="deploy/static/provider/cloud/deploy.yaml"
|
||||
NGINX_IC_YAML_URL="$NGINX_IC_BASE_URL/$NGINX_IC_VERSION/$NGINX_IC_DEPLOY_YAML"
|
||||
NGINX_IC_YAML="$YAML_DIR/ingress-nginx-deploy.yaml"
|
||||
|
||||
# GOTMPLs
|
||||
TMPL_K3D_CONFIG_YAML="$TMPL_DIR/k3d-config.yaml"
|
||||
TMPL_KIND_CONFIG_YAML="$TMPL_DIR/kind-config.yaml"
|
||||
TMPL_IPPOOLS_YAML="$TMPL_DIR/ippools.yaml"
|
||||
TMPL_CILIUM_YAML="$TMPL_DIR/cilium.yaml"
|
||||
TMPL_METALLB_CRDS_YAML="$TMPL_DIR/metallb-crds.yaml"
|
||||
|
||||
# Adjust variables based on other variables
|
||||
if [ "$METALLB_ENABLED" = "true" ]; then
|
||||
BGP_CONTROL_PLANE_ENABLED="false"
|
||||
else
|
||||
BGP_CONTROL_PLANE_ENABLED="true"
|
||||
fi
|
||||
|
||||
# ---------
|
||||
# FUNCTIONS
|
||||
# ---------
|
||||
|
||||
create_network() {
|
||||
NETWORK_ID="$(
|
||||
docker network inspect "$NETWORK_NAME" --format "{{.Id}}" 2>/dev/null
|
||||
)" || true
|
||||
if [ "$NETWORK_ID" ]; then
|
||||
echo "Using existing network '$NETWORK_NAME' with id '$NETWORK_ID'"
|
||||
else
|
||||
echo "Creating network '$NETWORK_NAME' in docker"
|
||||
docker network create \
|
||||
--driver "$NETWORK_TYPE" \
|
||||
--subnet "$NETWORK_SUBNET" \
|
||||
--gateway "$NETWORK_GATEWAY" \
|
||||
--ip-range "$NETWORK_IP_RANGE" \
|
||||
"$NETWORK_NAME"
|
||||
fi
|
||||
}
|
||||
|
||||
create_cluster() {
|
||||
echo "Creating $CTOOL cluster '$CNAME'"
|
||||
case "$CTOOL" in
|
||||
k3d)
|
||||
tmpl \
|
||||
-v "cnum=$CNUM" \
|
||||
-v "cname=$CNAME" \
|
||||
-v "host_ip=$HOST_IP" \
|
||||
-v "cluster_subnet=$CLUSTER_SUBNET" \
|
||||
-v "service_subnet=$SERVICE_SUBNET" \
|
||||
-v "work_dir=$WORK_DIR" \
|
||||
"$TMPL_K3D_CONFIG_YAML" |
|
||||
k3d cluster create -c -
|
||||
;;
|
||||
kind)
|
||||
tmpl \
|
||||
-v "cnum=$CNUM" \
|
||||
-v "cname=$CNAME" \
|
||||
-v "host_ip=$HOST_IP" \
|
||||
-v "cluster_subnet=$CLUSTER_SUBNET" \
|
||||
-v "service_subnet=$SERVICE_SUBNET" \
|
||||
-v "work_dir=$WORK_DIR" \
|
||||
"$TMPL_KIND_CONFIG_YAML" |
|
||||
kind create cluster --config="-"
|
||||
;;
|
||||
esac
|
||||
echo "Cluster '$CNAME' info"
|
||||
kubectl --context "$CTX" cluster-info
|
||||
}
|
||||
|
||||
install_gateway_api_crds() {
|
||||
BASE_URL="https://raw.githubusercontent.com/kubernetes-sigs/gateway-api"
|
||||
BASE_URL="$BASE_URL/v0.5.1/config/crd"
|
||||
echo "Installing GatewayAPI CRDs"
|
||||
for crd_yaml in standard/gateway.networking.k8s.io_gatewayclasses.yaml \
|
||||
standard/gateway.networking.k8s.io_gateways.yaml \
|
||||
standard/gateway.networking.k8s.io_httproutes.yaml \
|
||||
experimental/gateway.networking.k8s.io_referencegrants.yaml; do
|
||||
kubectl --context "$CTX" apply -f "$BASE_URL/$crd_yaml"
|
||||
done
|
||||
}
|
||||
|
||||
cilium_status() {
|
||||
echo "Checking cilium status"
|
||||
cilium status --wait --context "$CTX"
|
||||
}
|
||||
|
||||
master_node_ip() {
|
||||
# If we are not running kube-proxy the cilium Pods can't reach the api server
|
||||
# because the in-cluster service can't be reached, to fix the issue we use an
|
||||
# internal IP that the pods can reach, in this case we get the internal IP of
|
||||
# the master node container
|
||||
case "$CTOOL" in
|
||||
k3d) MASTER_NODE="node/$CTX-server-0";;
|
||||
kind) MASTER_NODE="node/$CNAME-control-plane";;
|
||||
*) echo "Unknown master node"; exit 1;;
|
||||
esac
|
||||
kubectl --context "$CTX" get "$MASTER_NODE" -o wide --no-headers |
|
||||
awk '{ print $6 }'
|
||||
}
|
||||
|
||||
cilium_cli_install() {
|
||||
if [ "$GATEWAY_API_ENABLED" = "true" ]; then
|
||||
install_gateway_api_crds
|
||||
fi
|
||||
_xtra_args=""
|
||||
if [ "$CNUM" = "2" ]; then
|
||||
_xtra_args="--inherit-ca kind-cilium1"
|
||||
fi
|
||||
MASTER_NODE_IP="$(master_node_ip)"
|
||||
# shellcheck disable=SC2086
|
||||
tmpl \
|
||||
-v "master_node_ip=$MASTER_NODE_IP" \
|
||||
-v "cnum=$CNUM" \
|
||||
-v "cname=$CNAME" \
|
||||
-v "bgp_control_plane_enabled=$BGP_CONTROL_PLANE_ENABLED" \
|
||||
-v "gateway_api_enabled=$GATEWAY_API_ENABLED" \
|
||||
-v "ingress_controller_default=$INGRESS_CONTROLLER_DEFAULT" \
|
||||
-v "ingress_controller_enabled=$INGRESS_CONTROLLER_ENABLED" \
|
||||
-v "loadbalancer_mode=$LOADBALANCER_MODE" \
|
||||
-v "tunnel=$TUNNEL" \
|
||||
"$TMPL_CILIUM_YAML" |
|
||||
cilium install --context "$CTX" --helm-values - $_xtra_args
|
||||
# Wait for the deployment
|
||||
cilium_status
|
||||
echo "Enabling hubble"
|
||||
cilium hubble enable --ui --context "$CTX"
|
||||
}
|
||||
|
||||
cilium_helm_install() {
|
||||
if [ "$GATEWAY_API_ENABLED" = "true" ]; then
|
||||
install_gateway_api_crds
|
||||
fi
|
||||
helm repo add cilium https://helm.cilium.io/ >/dev/null || true
|
||||
# Copy the cilium-ca to the second cluster
|
||||
if [ "$CNUM" = "2" ]; then
|
||||
echo "Copying the 'cilium-ca' from '$CTOOL-cilium1' to '$CTX'"
|
||||
kubectl --context "$CTOOL-cilium1" -n kube-system get secrets/cilium-ca \
|
||||
-o yaml | kubectl apply --context "$CTX" -f -
|
||||
fi
|
||||
MASTER_NODE_IP="$(master_node_ip)"
|
||||
# shellcheck disable=SC2086
|
||||
tmpl \
|
||||
-v "master_node_ip=$MASTER_NODE_IP" \
|
||||
-v "cnum=$CNUM" \
|
||||
-v "cname=$CNAME" \
|
||||
-v "bgp_control_plane_enabled=$BGP_CONTROL_PLANE_ENABLED" \
|
||||
-v "gateway_api_enabled=$GATEWAY_API_ENABLED" \
|
||||
-v "ingress_controller_default=$INGRESS_CONTROLLER_DEFAULT" \
|
||||
-v "ingress_controller_enabled=$INGRESS_CONTROLLER_ENABLED" \
|
||||
-v "loadbalancer_mode=$LOADBALANCER_MODE" \
|
||||
-v "tunnel=$TUNNEL" \
|
||||
"$TMPL_CILIUM_YAML" |
|
||||
helm upgrade --install cilium cilium/cilium --version 1.13.1 \
|
||||
--kube-context "$CTX" --namespace=kube-system --values=-
|
||||
}
|
||||
|
||||
cilium_install(){
|
||||
echo "Installing cilium in cluster '$CNAME'"
|
||||
cilium_helm_install
|
||||
cilium_status
|
||||
}
|
||||
|
||||
lb_download_yaml() {
|
||||
[ -d "$YAML_DIR" ] || mkdir "$YAML_DIR"
|
||||
curl -fsSL -o "$METALLB_YAML" "$METALLB_YAML_URL"
|
||||
}
|
||||
|
||||
lb_install() {
|
||||
if [ "$METALLB_ENABLED" = "true" ]; then
|
||||
if [ ! -f "$METALLB_YAML" ]; then
|
||||
lb_download_yaml
|
||||
fi
|
||||
echo "Installing metallb on kind cluster '$CNAME'"
|
||||
kubectl --context "$CTX" apply -f "$METALLB_YAML"
|
||||
echo "Waiting for metallb to be ready"
|
||||
kubectl --context "$CTX" rollout status deployment --timeout="120s" \
|
||||
-n "metallb-system" "controller"
|
||||
echo "Configuring metallb"
|
||||
tmpl -v "lb_pool_range=$LB_POOL_RANGE" "$TMPL_METALLB_CRDS_YAML" |
|
||||
kubectl --context "$CTX" apply -f -
|
||||
elif [ "$BGP_CONTROL_PLANE_ENABLED" = "true" ]; then
|
||||
echo "Adding LB IPAM Pools"
|
||||
tmpl -v "lb_pool_cdir=$LB_POOL_CDIR" "$TMPL_IPPOOLS_YAML" |
|
||||
kubectl --context "$CTX" apply -f -
|
||||
fi
|
||||
}
|
||||
|
||||
ingress_download_yaml() {
|
||||
[ -d "$YAML_DIR" ] || mkdir "$YAML_DIR"
|
||||
curl -fsSL -o "$NGINX_IC_YAML" "$NGINX_IC_YAML_URL"
|
||||
}
|
||||
|
||||
ingress_install() {
|
||||
if [ "$NGINX_IC_ENABLED" = "true" ]; then
|
||||
if [ ! -f "$NGINX_IC_YAML" ]; then
|
||||
ingress_download_yaml
|
||||
fi
|
||||
echo "Installing nginx ingress controller on kind cluster '$CNAME'"
|
||||
kubectl --context "$CTX" apply -f "$NGINX_IC_YAML"
|
||||
echo "Waiting for the nginx controller to be ready"
|
||||
kubectl --context "$CTX" wait --namespace ingress-nginx \
|
||||
--for=condition=ready pod \
|
||||
--selector=app.kubernetes.io/component=controller \
|
||||
--timeout=120s
|
||||
fi
|
||||
}
|
||||
|
||||
mesh_install() {
|
||||
echo "Enabling cluster-mesh on cluster '$CNAME'"
|
||||
cilium clustermesh enable --context "$CTX" --service-type LoadBalancer
|
||||
echo "Checking cilium status on cluster '$CNAME'"
|
||||
cilium status --context "$CTX" --wait
|
||||
if [ "$CNUM" -eq "2" ]; then
|
||||
echo "Connecting cluster"
|
||||
cilium clustermesh connect --context "$CTOOL-cilium1" \
|
||||
--destination-context "$CTOOL-cilium2"
|
||||
echo "Checking cilium status on cluster '$CNAME'"
|
||||
cilium status --context "$CTX" --wait
|
||||
fi
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 CTOOL CLUSTER [OPERATION]
|
||||
|
||||
- CTOOL is 'k3d' or 'kind'
|
||||
- CLUSTER is '1' or '2'
|
||||
- OPERATION is one of:
|
||||
- 'base' (== 'network,cluster,cilium,lb,ingress')
|
||||
- 'full' (== 'base,mesh')
|
||||
- 'network'
|
||||
- 'cluster'
|
||||
- 'cilium'
|
||||
- 'lb'
|
||||
- 'lb-yaml'
|
||||
- 'ingress'
|
||||
- 'ingress-yaml'
|
||||
- 'mesh'
|
||||
- 'status'
|
||||
If missing the default OPERATION is 'base'
|
||||
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ====
|
||||
# MAIN
|
||||
# ====
|
||||
|
||||
CTOOL="$1"
|
||||
CNUM="$2"
|
||||
ACTION="$3"
|
||||
|
||||
case "$CTOOL" in
|
||||
k3d)
|
||||
NETWORK_NAME="$K3D_NETWORK_NAME"
|
||||
NET_PREFIX="$K3D_NET_PREFIX"
|
||||
CLUSTER_SUBNET_PREFIX="$K3D_CLUSTER_SUBNET_PREFIX"
|
||||
SERVICE_SUBNET_PREFIX="$K3D_SERVICE_SUBNET_PREFIX"
|
||||
;;
|
||||
kind)
|
||||
NETWORK_NAME="$KIND_NETWORK_NAME"
|
||||
NET_PREFIX="$KIND_NET_PREFIX"
|
||||
CLUSTER_SUBNET_PREFIX="$KIND_CLUSTER_SUBNET_PREFIX"
|
||||
SERVICE_SUBNET_PREFIX="$KIND_SERVICE_SUBNET_PREFIX"
|
||||
;;
|
||||
*) usage 1;;
|
||||
esac
|
||||
case "$CNUM" in
|
||||
1|2) ;;
|
||||
*) usage 1 ;;
|
||||
esac
|
||||
|
||||
# Adjust variables based on the input arguments
|
||||
CNAME="cilium$CNUM"
|
||||
CTX="$CTOOL-$CNAME"
|
||||
HOST_IP="127.$NET_PREFIX.$CNUM"
|
||||
CLUSTER_SUBNET="$CLUSTER_SUBNET_PREFIX$CNUM.0.0/16"
|
||||
SERVICE_SUBNET="$SERVICE_SUBNET_PREFIX$CNUM.0.0/16"
|
||||
NETWORK_SUBNET="$NET_PREFIX.0.0/16"
|
||||
NETWORK_GATEWAY="$NET_PREFIX.0.1"
|
||||
NETWORK_IP_RANGE="$NET_PREFIX.0.0/17"
|
||||
LB_POOL_CDIR="$NET_PREFIX.20$CNUM.0/24"
|
||||
LB_POOL_RANGE="$NET_PREFIX.20$CNUM.1-$NET_PREFIX.20$CNUM.254"
|
||||
|
||||
case "$ACTION" in
|
||||
base|"")
|
||||
create_network
|
||||
create_cluster
|
||||
cilium_install
|
||||
lb_install
|
||||
ingress_install
|
||||
;;
|
||||
full)
|
||||
create_network
|
||||
create_cluster
|
||||
cilium_install
|
||||
lb_install
|
||||
ingress_install
|
||||
mesh_install
|
||||
;;
|
||||
network) create_network ;;
|
||||
cluster) create_cluster ;;
|
||||
cilium) cilium_install;;
|
||||
lb) lb_install ;;
|
||||
lb-yaml) lb_download_yaml ;;
|
||||
ingress) ingress_install;;
|
||||
ingress-yaml) ingress_download_yaml;;
|
||||
status) cilium_status;;
|
||||
mesh) mesh_install;;
|
||||
*) usage 1;;
|
||||
esac
|
91
sbin/cilium-remove.sh
Executable file
91
sbin/cilium-remove.sh
Executable file
|
@ -0,0 +1,91 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: cilium-remove.sh
|
||||
# Description: Tool to remove k8s cilium test clusters using k3d or kind
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# ---------
|
||||
# VARIABLES
|
||||
# ---------
|
||||
K3D_NETWORK_NAME="cilium"
|
||||
KIND_NETWORK_NAME="kind"
|
||||
|
||||
# ---------
|
||||
# FUNCTIONS
|
||||
# ---------
|
||||
|
||||
delete_network() {
|
||||
NETWORK_ID="$(
|
||||
docker network inspect "$NETWORK_NAME" --format "{{.Id}}" 2>/dev/null
|
||||
)" || true
|
||||
if [ "$NETWORK_ID" ]; then
|
||||
echo "Removing network '$NETWORK_NAME' with id '$NETWORK_ID'"
|
||||
docker network rm "$NETWORK_NAME"
|
||||
else
|
||||
echo "Network '$NETWORK_NAME' not found in docker"
|
||||
fi
|
||||
}
|
||||
|
||||
delete_cluster() {
|
||||
case "$CTOOL" in
|
||||
k3d)
|
||||
echo "Deleting k3d cluster '$CNAME'"
|
||||
k3d cluster delete "$CNAME"
|
||||
;;
|
||||
kind)
|
||||
echo "Deleting kind cluster '$CNAME'"
|
||||
kind delete cluster -n "$CNAME"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 CTOOL CLUSTER [OPERATION]
|
||||
|
||||
Where:
|
||||
|
||||
- CTOOL is 'k3d' or 'kind'
|
||||
- CLUSTER is '1' or '2'
|
||||
- OPERATION is one of:
|
||||
- 'all'
|
||||
- 'network'
|
||||
- 'cluster'
|
||||
If missing the default OPERATION is 'all'
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ====
|
||||
# MAIN
|
||||
# ====
|
||||
|
||||
CTOOL="$1"
|
||||
CNUM="$2"
|
||||
ACTION="${3:-all}"
|
||||
|
||||
case "$CTOOL" in
|
||||
k3d) NETWORK_NAME="$K3D_NETWORK_NAME" ;;
|
||||
kind) NETWORK_NAME="$KIND_NETWORK_NAME" ;;
|
||||
*) usage 1;;
|
||||
esac
|
||||
|
||||
case "$CNUM" in
|
||||
1|2) ;;
|
||||
*) usage 1 ;;
|
||||
esac
|
||||
CNAME="cilium$CNUM"
|
||||
|
||||
case "$ACTION" in
|
||||
all|"")
|
||||
delete_cluster
|
||||
delete_network
|
||||
;;
|
||||
cluster) delete_cluster ;;
|
||||
network) delete_network ;;
|
||||
*) usage 1;;
|
||||
esac
|
318
sbin/tools.sh
Executable file
318
sbin/tools.sh
Executable file
|
@ -0,0 +1,318 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: tools.sh
|
||||
# Description: Tool to check and install tools used on this repo
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# ---------
|
||||
# Variables
|
||||
# ---------
|
||||
|
||||
# System dirs
|
||||
BASH_COMPLETION="/etc/bash_completion.d"
|
||||
ZSH_COMPLETIONS="/usr/share/zsh/vendor-completions"
|
||||
|
||||
# Terminal related variables
|
||||
if [ "$TERM" ] && type tput >/dev/null; then
|
||||
bold="$(tput bold)"
|
||||
normal="$(tput sgr0)"
|
||||
else
|
||||
bold=""
|
||||
normal=""
|
||||
fi
|
||||
export yes_no="(${bold}Y${normal}es/${bold}N${normal}o)"
|
||||
|
||||
# Versions
|
||||
CILIUM_CLI_VERSION="${CILIUM_CLI_VERSION:-v0.13.2}"
|
||||
# Uncomment to get the latest helm version
|
||||
# GET_HELM_URL="https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3"
|
||||
HELM_VERSION="${HELM_VERSION:-3.11.2}"
|
||||
HUBBLE_VERSION="${HUBBLE_VERSION:-v0.11.3}"
|
||||
K3D_VERSION="${K3D_VERSION:-v5.4.9}"
|
||||
KIND_VERSION="${KIND_VERSION:-v0.18.0}"
|
||||
KUBECTL_VERSION="${KUBECTL_VERSION:-1.26.3}"
|
||||
TMPL_VERSION="${TMPL_VERSION:-v0.4.0}"
|
||||
|
||||
# ---------
|
||||
# Functions
|
||||
# ---------
|
||||
|
||||
# Auxiliary function to read a boolean value. $1 text to show - $2 default value
|
||||
read_bool() {
|
||||
case "${2}" in
|
||||
y | Y | yes | Yes | YES | true | True | TRUE) _yn="Yes" ;;
|
||||
*) _yn="No" ;;
|
||||
esac
|
||||
printf "%s ${yes_no} [%s]: " "${1}" "${bold}${_yn}${normal}"
|
||||
read -r READ_VALUE
|
||||
case "${READ_VALUE}" in
|
||||
'') [ "$_yn" = "Yes" ] && READ_VALUE="true" || READ_VALUE="false" ;;
|
||||
y | Y | yes | Yes | YES | true | True | TRUE) READ_VALUE="true" ;;
|
||||
*) READ_VALUE="false" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Auxiliary function to check if a boolean value is set to yes/true or not
|
||||
is_selected() {
|
||||
case "${1}" in
|
||||
y | Y | yes | Yes | YES | true | True | TRUE) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Auxiliary function to check if an application is installed
|
||||
tools_app_installed() {
|
||||
_app="$1"
|
||||
type "$_app" >/dev/null 2>&1 || return 1
|
||||
}
|
||||
|
||||
# Function to check if all the tools are installed
|
||||
tools_check_apps_installed() {
|
||||
_missing=""
|
||||
for _app in "$@"; do
|
||||
tools_app_installed "$_app" || _missing="$_missing $_app"
|
||||
done
|
||||
if [ "$_missing" ]; then
|
||||
echo "The following apps could not be found:"
|
||||
for _app in $_missing; do
|
||||
echo "- $_app"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Auxiliary function to check if we want to install an app
|
||||
tools_install_app() {
|
||||
_app="$1"
|
||||
if tools_app_installed "$_app"; then
|
||||
echo "$_app found ($(type "$_app"))."
|
||||
MSG="Re-install in /usr/local/bin?"
|
||||
OPT="false"
|
||||
else
|
||||
echo "$_app could not be found."
|
||||
MSG="Install it in /usr/local/bin?"
|
||||
OPT="true"
|
||||
fi
|
||||
# Export NONINTERACTIVE as 'true' to use default values
|
||||
if [ "$NONINTERACTIVE" = "true" ]; then
|
||||
READ_VALUE="$OPT"
|
||||
else
|
||||
read_bool "$MSG" "$OPT"
|
||||
fi
|
||||
is_selected "${READ_VALUE}" && return 0 || return 1
|
||||
}
|
||||
|
||||
tools_check_cilium() {
|
||||
if tools_install_app "cilium"; then
|
||||
tmp_dir="$(mktemp -d)"
|
||||
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://github.com/cilium/cilium-cli/releases/download"
|
||||
url="$url/${CILIUM_CLI_VERSION}/cilium-linux-$arch.tar.gz"
|
||||
curl -fsSL -o "$tmp_dir/cilium.tar.gz" "$url"
|
||||
tar xzf "$tmp_dir/cilium.tar.gz" -C "$tmp_dir" "cilium"
|
||||
sudo install "$tmp_dir/cilium" /usr/local/bin
|
||||
rm -rf "$tmp_dir"
|
||||
cilium version
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "cilium completion bash > $BASH_COMPLETION/cilium"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "cilium completion zsh > $ZSH_COMPLETIONS/_cilium"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_docker() {
|
||||
if tools_install_app "docker"; then
|
||||
tmp_dir="$(mktemp -d)"
|
||||
curl -fsSL -o "$tmp_dir/install-docker.sh" "https://get.docker.com"
|
||||
sh "$tmp_dir/install-docker.sh"
|
||||
rm -rf "$tmp_dir"
|
||||
sudo usermod -aG docker "$(id -un)"
|
||||
docker --version
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_helm() {
|
||||
if tools_install_app "helm"; then
|
||||
tmp_dir="$(mktemp -d)"
|
||||
if [ "$GET_HELM" ]; then
|
||||
curl -fsSL -o "$tmp_dir/get_helm.sh" "$GET_HELM"
|
||||
bash "$tmp_dir/get_helm.sh"
|
||||
else
|
||||
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://get.helm.sh/helm-v$HELM_VERSION-$os-$arch.tar.gz"
|
||||
curl -fsSL -o "$tmp_dir/helm.tar.gz" "$url"
|
||||
tar xzf "$tmp_dir/helm.tar.gz" -C "$tmp_dir" "$os-$arch/helm"
|
||||
sudo install "$tmp_dir/$os-$arch/helm" /usr/local/bin
|
||||
fi
|
||||
rm -rf "$tmp_dir"
|
||||
helm version
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "helm completion bash > $BASH_COMPLETION/helm"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "helm completion zsh > $ZSH_COMPLETIONS/_helm"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_hubble() {
|
||||
if tools_install_app "hubble"; then
|
||||
tmp_dir="$(mktemp -d)"
|
||||
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://github.com/cilium/hubble/releases/download"
|
||||
url="$url/${HUBBLE_VERSION}/hubble-linux-$arch.tar.gz"
|
||||
curl -fsSL -o "$tmp_dir/hubble.tar.gz" "$url"
|
||||
tar xzf "$tmp_dir/hubble.tar.gz" -C "$tmp_dir" "hubble"
|
||||
sudo install "$tmp_dir/hubble" /usr/local/bin
|
||||
rm -rf "$tmp_dir"
|
||||
hubble version
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "hubble completion bash > $BASH_COMPLETION/hubble"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "hubble completion zsh > $ZSH_COMPLETIONS/_hubble"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_k3d() {
|
||||
if tools_install_app "k3d"; then
|
||||
[ -d /usr/local/bin ] || sudo mkdir /usr/local/bin
|
||||
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh |
|
||||
TAG="$K3D_VERSION" bash
|
||||
k3d version
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "k3d completion bash > $BASH_COMPLETION/k3d"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "k3d completion zsh > $ZSH_COMPLETIONS/_k3d"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_kind() {
|
||||
if tools_install_app "kind"; then
|
||||
os="$(uname | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://github.com/kubernetes-sigs/kind/releases/download"
|
||||
url="$url/$KIND_VERSION/kind-$os-$arch"
|
||||
[ -d /usr/local/bin ] || sudo mkdir /usr/local/bin
|
||||
tmp_dir="$(mktemp -d)"
|
||||
curl -fsSL -o "$tmp_dir/kind" "$url"
|
||||
sudo install "$tmp_dir/kind" /usr/local/bin/
|
||||
rm -rf "$tmp_dir"
|
||||
kind version
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "kind completion bash > $BASH_COMPLETION/kind"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "kind completion zsh > $ZSH_COMPLETIONS/_kind"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
tools_check_kubectl() {
|
||||
if tools_install_app "kubectl"; then
|
||||
os="$(uname | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://dl.k8s.io/release/v$KUBECTL_VERSION/bin/$os/$arch/kubectl"
|
||||
[ -d /usr/local/bin ] || sudo mkdir /usr/local/bin
|
||||
tmp_dir="$(mktemp -d)"
|
||||
curl -fsSL -o "$tmp_dir/kubectl" "$url"
|
||||
sudo install "$tmp_dir/kubectl" /usr/local/bin/
|
||||
rm -rf "$tmp_dir"
|
||||
kubectl version --client --output=yaml
|
||||
if [ -d "$BASH_COMPLETION" ]; then
|
||||
sudo sh -c "kubectl completion bash > $BASH_COMPLETION/kubectl"
|
||||
fi
|
||||
if [ -d "$ZSH_COMPLETIONS" ]; then
|
||||
sudo sh -c "kubectl completion zsh > $ZSH_COMPLETIONS/_kubectl"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tools_check_tmpl() {
|
||||
if tools_install_app "tmpl"; then
|
||||
tmp_dir="$(mktemp -d)"
|
||||
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
esac
|
||||
url="https://github.com/krakozaure/tmpl/releases/download"
|
||||
url="$url/${TMPL_VERSION}/tmpl-linux_$arch"
|
||||
curl -fsSL -o "$tmp_dir/tmpl" "$url"
|
||||
sudo install "$tmp_dir/tmpl" /usr/local/bin
|
||||
rm -rf "$tmp_dir"
|
||||
fi
|
||||
}
|
||||
tools_check() {
|
||||
for _app in "$@"; do
|
||||
case "$_app" in
|
||||
cilium) tools_check_cilium;;
|
||||
docker) tools_check_docker ;;
|
||||
helm) tools_check_helm ;;
|
||||
k3d) tools_check_k3d ;;
|
||||
kind) tools_check_kind ;;
|
||||
kubectl) tools_check_kubectl ;;
|
||||
hubble) tools_check_hubble;;
|
||||
tmpl) tools_check_tmpl ;;
|
||||
*) echo "Unknown application '$_app'" ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
tools_apps_list() {
|
||||
tools="cilium docker helm k3d kind kubectl hubble tmpl"
|
||||
echo "$tools"
|
||||
}
|
||||
|
||||
# Usage function
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Command to check and install tools used by our scripts.
|
||||
|
||||
Usage:
|
||||
|
||||
$(basename "$0") apps|SPACE_SEPARATED_LIST_OF_TOOLS
|
||||
|
||||
Where the SPACE_SEPARATED_LIST_OF_TOOLS can include the following apps:
|
||||
|
||||
$(for tool in $(tools_apps_list); do echo "- $tool"; done)
|
||||
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ----
|
||||
# MAIN
|
||||
# ----
|
||||
|
||||
# shellcheck disable=SC2046
|
||||
case "$1" in
|
||||
"") usage 0 ;;
|
||||
apps) tools_check $(tools_apps_list) ;;
|
||||
*) tools_check "$@" ;;
|
||||
esac
|
||||
|
||||
# ----
|
||||
# vim: ts=2:sw=2:et:ai:sts=2
|
78
test/cilium-connectivity.sh
Executable file
78
test/cilium-connectivity.sh
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: cilium-connectivity.sh
|
||||
# Description: Script to test cilium connectivity in our deployments
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# ---------
|
||||
# VARIABLES
|
||||
# ---------
|
||||
|
||||
HUBBLE_PF="${HUBBLE_PF:-false}"
|
||||
|
||||
# ---------
|
||||
# FUNCTIONS
|
||||
# ---------
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 CTOOL CLUSTER
|
||||
|
||||
Where:
|
||||
|
||||
- CTOOL is 'k3d' or 'kind'
|
||||
- CLUSTER is '1', '2' or '12' (multicluster test)
|
||||
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
start_pf() {
|
||||
if [ "$HUBBLE_PF" = "true" ]; then
|
||||
cilium hubble port-forward --context "$CTX" &
|
||||
PF_PID="$!"
|
||||
echo "Started hubble port-forward for $CTX with PID '$PF_PID'"
|
||||
else
|
||||
PF_PID=""
|
||||
fi
|
||||
}
|
||||
|
||||
stop_pf() {
|
||||
if [ "$PF_PID" ]; then
|
||||
echo "Killing hubble port-forward (PID '$PF_PID')"
|
||||
kill "$PF_PID"
|
||||
fi
|
||||
}
|
||||
|
||||
# ====
|
||||
# MAIN
|
||||
# ====
|
||||
|
||||
CTOOL="$1"
|
||||
CNUM="$2"
|
||||
|
||||
case "$CTOOL" in
|
||||
k3d|kind) ;;
|
||||
*) usage 1;;
|
||||
esac
|
||||
case "$CNUM" in
|
||||
1|2)
|
||||
CNAME="cilium$CNUM"
|
||||
CTX="$CTOOL-$CNAME"
|
||||
start_pf
|
||||
cilium connectivity test --context "$CTX"
|
||||
;;
|
||||
12)
|
||||
CTX="$CTOOL-cilium1"
|
||||
CTX2="$CTOOL-cilium2"
|
||||
start_pf
|
||||
cilium connectivity test --context "$CTX" --multi-cluster "$CTX2"
|
||||
;;
|
||||
*) usage 1 ;;
|
||||
esac
|
||||
|
||||
stop_pf
|
115
test/http-sw.sh
Executable file
115
test/http-sw.sh
Executable file
|
@ -0,0 +1,115 @@
|
|||
#!/bin/sh
|
||||
|
||||
# REF: https://docs.cilium.io/en/stable/gettingstarted/demo/#starwars-demo
|
||||
|
||||
# Compute WORK_DIR
|
||||
SCRIPT="$(readlink -f "$0")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT")"
|
||||
WORK_DIR_RELPATH="."
|
||||
WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")"
|
||||
|
||||
# VARIABLES
|
||||
NAMESPACE="http-sw"
|
||||
YAML_DIR="$WORK_DIR/http-sw"
|
||||
APP_YAML="$YAML_DIR/http-sw-app.yaml"
|
||||
SW_L3_L4_POLICY_YAML="$YAML_DIR/sw_l3_l4_policy.yaml"
|
||||
SW_L3_L4_L7_POLICY_YAML="$YAML_DIR/sw_l3_l4_l7_policy.yaml"
|
||||
|
||||
access_test() {
|
||||
for pod in xwing tiefighter; do
|
||||
ret="0"
|
||||
echo "Checking deathstar access from '$pod'"
|
||||
kubectl -n "$NAMESPACE" exec "$pod" -- curl --connect-timeout 5 \
|
||||
-s -XPOST deathstar.$NAMESPACE.svc.cluster.local/v1/request-landing ||
|
||||
ret="$?"
|
||||
if [ "$ret" -ne "0" ]; then
|
||||
echo "Connection failed!"
|
||||
fi
|
||||
done
|
||||
# shellcheck disable=SC2043
|
||||
for pod in tiefighter; do
|
||||
ret="0"
|
||||
echo "Checking deathstar exaust-port access from '$pod'"
|
||||
kubectl -n "$NAMESPACE" exec "$pod" -- curl --connect-timeout 5 \
|
||||
-s -XPUT deathstar.$NAMESPACE.svc.cluster.local/v1/exhaust-port ||
|
||||
ret="$?"
|
||||
if [ "$ret" -ne "0" ]; then
|
||||
echo "Connection failed!"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
create_deployment() {
|
||||
kubectl create ns "$NAMESPACE" || true
|
||||
kubectl -n "$NAMESPACE" apply -f "$APP_YAML"
|
||||
}
|
||||
|
||||
delete_deployment() {
|
||||
kubectl delete ns "$NAMESPACE"
|
||||
}
|
||||
|
||||
list_sw_endpoints() {
|
||||
for pod in $(kubectl -n kube-system get pods -l k8s-app=cilium -o name); do
|
||||
OUTPUT="$(
|
||||
kubectl -n kube-system exec "$pod" -c cilium-agent \
|
||||
-- cilium endpoint list
|
||||
)"
|
||||
echo "$OUTPUT" | head -1
|
||||
echo "$OUTPUT" | grep -B6 "org=\(alliance\|empire\)" | grep -v "^--"
|
||||
done
|
||||
}
|
||||
|
||||
status() {
|
||||
kubectl -n "$NAMESPACE" get all,CiliumNetworkPolicy
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 create|delete|desc|endpoints|policy-(l34|l7|none)|status|test"
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ====
|
||||
# MAIN
|
||||
# ====
|
||||
|
||||
case "$1" in
|
||||
create) create_deployment;;
|
||||
delete) delete_deployment;;
|
||||
desc|describe)
|
||||
if kubectl -n "$NAMESPACE" get cnp/rule1 -o name 2>/dev/null 1>&2; then
|
||||
echo "Describe current policy"
|
||||
kubectl -n "$NAMESPACE" describe CiliumNetworkPolicy/rule1
|
||||
else
|
||||
echo "Policy not installed"
|
||||
fi
|
||||
;;
|
||||
eps|endpoints) list_sw_endpoints;;
|
||||
policy-l34)
|
||||
echo "Adding SW L3-L4 policy"
|
||||
echo ""
|
||||
cat "$SW_L3_L4_POLICY_YAML"
|
||||
echo ""
|
||||
kubectl -n "$NAMESPACE" apply -f "$SW_L3_L4_POLICY_YAML"
|
||||
;;
|
||||
policy-l7)
|
||||
echo "Adding SW L3-L4-L7 policy:"
|
||||
echo ""
|
||||
cat "$SW_L3_L4_L7_POLICY_YAML"
|
||||
echo ""
|
||||
kubectl -n "$NAMESPACE" apply -f "$SW_L3_L4_L7_POLICY_YAML"
|
||||
;;
|
||||
policy-none)
|
||||
echo "Removing Cilium Network Policy 'rule1'"
|
||||
kubectl -n "$NAMESPACE" delete CiliumNetworkPolicy/rule1
|
||||
;;
|
||||
status) status;;
|
||||
test)
|
||||
echo "Running access test"
|
||||
access_test
|
||||
;;
|
||||
"") usage "0" ;;
|
||||
*) usage "1" ;;
|
||||
esac
|
||||
|
||||
# ----
|
||||
# vim: ts=2:sw=2:et:ai:sts=2
|
63
test/http-sw/http-sw-app.yaml
Normal file
63
test/http-sw/http-sw-app.yaml
Normal file
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: deathstar
|
||||
labels:
|
||||
app.kubernetes.io/name: deathstar
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
org: empire
|
||||
class: deathstar
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: deathstar
|
||||
labels:
|
||||
app.kubernetes.io/name: deathstar
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
org: empire
|
||||
class: deathstar
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
org: empire
|
||||
class: deathstar
|
||||
app.kubernetes.io/name: deathstar
|
||||
spec:
|
||||
containers:
|
||||
- name: deathstar
|
||||
image: docker.io/cilium/starwars
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: tiefighter
|
||||
labels:
|
||||
org: empire
|
||||
class: tiefighter
|
||||
app.kubernetes.io/name: tiefighter
|
||||
spec:
|
||||
containers:
|
||||
- name: spaceship
|
||||
image: docker.io/tgraf/netperf
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: xwing
|
||||
labels:
|
||||
app.kubernetes.io/name: xwing
|
||||
org: alliance
|
||||
class: xwing
|
||||
spec:
|
||||
containers:
|
||||
- name: spaceship
|
||||
image: docker.io/tgraf/netperf
|
22
test/http-sw/sw_l3_l4_l7_policy.yaml
Normal file
22
test/http-sw/sw_l3_l4_l7_policy.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumNetworkPolicy
|
||||
metadata:
|
||||
name: "rule1"
|
||||
spec:
|
||||
description: "L7 policy to restrict access to specific HTTP call"
|
||||
endpointSelector:
|
||||
matchLabels:
|
||||
org: empire
|
||||
class: deathstar
|
||||
ingress:
|
||||
- fromEndpoints:
|
||||
- matchLabels:
|
||||
org: empire
|
||||
toPorts:
|
||||
- ports:
|
||||
- port: "80"
|
||||
protocol: TCP
|
||||
rules:
|
||||
http:
|
||||
- method: "POST"
|
||||
path: "/v1/request-landing"
|
18
test/http-sw/sw_l3_l4_policy.yaml
Normal file
18
test/http-sw/sw_l3_l4_policy.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
apiVersion: "cilium.io/v2"
|
||||
kind: CiliumNetworkPolicy
|
||||
metadata:
|
||||
name: "rule1"
|
||||
spec:
|
||||
description: "L3-L4 policy to restrict deathstar access to empire ships only"
|
||||
endpointSelector:
|
||||
matchLabels:
|
||||
org: empire
|
||||
class: deathstar
|
||||
ingress:
|
||||
- fromEndpoints:
|
||||
- matchLabels:
|
||||
org: empire
|
||||
toPorts:
|
||||
- ports:
|
||||
- port: "80"
|
||||
protocol: TCP
|
114
test/ingress-basic.sh
Executable file
114
test/ingress-basic.sh
Executable file
|
@ -0,0 +1,114 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: ingress-basic.sh
|
||||
# Description: Script to test the ingress services on our cilium deployments
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
# REF: https://docs.cilium.io/en/latest/network/servicemesh/http/
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# Compute WORK_DIR
|
||||
SCRIPT="$(readlink -f "$0")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT")"
|
||||
WORK_DIR_RELPATH="."
|
||||
WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")"
|
||||
|
||||
# VARIABLES
|
||||
NAMESPACE="ingress-basic"
|
||||
YAML_DIR="$WORK_DIR/ingress-basic"
|
||||
BOOKINFO_YAML="$YAML_DIR/bookinfo.yaml"
|
||||
|
||||
create_deployment() {
|
||||
kubectl create ns "$NAMESPACE" || true
|
||||
kubectl apply -n "$NAMESPACE" -f "$BOOKINFO_YAML"
|
||||
kubectl apply -n "$NAMESPACE" -f "$INGRESS_BASIC_YAML"
|
||||
}
|
||||
|
||||
delete_deployment() {
|
||||
kubectl delete ns "$NAMESPACE"
|
||||
}
|
||||
|
||||
wait_for_deployments() {
|
||||
for _deployment in productpage-v1 details-v1; do
|
||||
echo "Waiting for '$_deployment' deployment to be ready"
|
||||
kubectl wait -n "$NAMESPACE" deployment "$_deployment" \
|
||||
--for condition=Available=True --timeout=90s
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_ingress(){
|
||||
printf "Waiting for the ingress to be ready "
|
||||
while true; do
|
||||
INGRESS="$(
|
||||
kubectl get -n "$NAMESPACE" ingress \
|
||||
-o jsonpath="{.items[0].status.loadBalancer.ingress}"
|
||||
)"
|
||||
if [ -z "$INGRESS" ]; then
|
||||
printf "."
|
||||
sleep 1
|
||||
else
|
||||
echo ". OK"
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
print_objects() {
|
||||
kubectl get -n "$NAMESPACE" pods
|
||||
kubectl get -n "$NAMESPACE" svc
|
||||
kubectl get -n "$NAMESPACE" ingress
|
||||
kubectl get -n "$INGRESS_NAMESPACE" "$INGRESS_CONTROLLER"
|
||||
}
|
||||
|
||||
test_ingress() {
|
||||
HTTP_INGRESS="$(
|
||||
kubectl get -n "$INGRESS_NAMESPACE" "$INGRESS_CONTROLLER" \
|
||||
-o jsonpath='{.status.loadBalancer.ingress[0].ip}'
|
||||
)"
|
||||
URL="http://$HTTP_INGRESS/details/1"
|
||||
echo "Testing 'details-v1' service connecting to '$URL'"
|
||||
curl -s --fail "$URL" | jq
|
||||
URL="http://$HTTP_INGRESS/"
|
||||
echo "Testing 'productpage-v1' service connecting to '$URL' (10 first lines)"
|
||||
curl -s --fail "$URL" | head -n 10
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 cilium|nginx create|delete|status|test|wait"
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ----
|
||||
# MAIN
|
||||
# ----
|
||||
|
||||
case "$1" in
|
||||
cilium)
|
||||
# We assume that the cilium ingress is shared
|
||||
INGRESS_NAMESPACE="kube-system"
|
||||
INGRESS_CONTROLLER="service/cilium-ingress"
|
||||
INGRESS_BASIC_YAML="$YAML_DIR/ingress-basic-cilium.yaml"
|
||||
;;
|
||||
nginx)
|
||||
INGRESS_NAMESPACE="ingress-nginx"
|
||||
INGRESS_CONTROLLER="service/ingress-nginx-controller"
|
||||
INGRESS_BASIC_YAML="$YAML_DIR/ingress-basic-nginx.yaml"
|
||||
;;
|
||||
"") usage 0;;
|
||||
*) usage 1;;
|
||||
esac
|
||||
|
||||
case "$2" in
|
||||
create) create_deployment;;
|
||||
delete) delete_deployment;;
|
||||
status) print_objects;;
|
||||
test) test_ingress;;
|
||||
wait) wait_for_deployments && wait_for_ingress;;
|
||||
*) usage 1;;
|
||||
esac
|
||||
|
||||
# ----
|
||||
# vim: ts=2:sw=2:et:ai:sts=2
|
343
test/ingress-basic/bookinfo.yaml
Normal file
343
test/ingress-basic/bookinfo.yaml
Normal file
|
@ -0,0 +1,343 @@
|
|||
# Copyright Istio Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
##################################################################################################
|
||||
# This file defines the services, service accounts, and deployments for the Bookinfo sample.
|
||||
#
|
||||
# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments:
|
||||
#
|
||||
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
#
|
||||
# Alternatively, you can deploy any resource separately:
|
||||
#
|
||||
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service
|
||||
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount
|
||||
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment
|
||||
##################################################################################################
|
||||
|
||||
##################################################################################################
|
||||
# Details service
|
||||
##################################################################################################
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: details
|
||||
labels:
|
||||
app: details
|
||||
service: details
|
||||
spec:
|
||||
ports:
|
||||
- port: 9080
|
||||
name: http
|
||||
selector:
|
||||
app: details
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: bookinfo-details
|
||||
labels:
|
||||
account: details
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: details-v1
|
||||
labels:
|
||||
app: details
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: details
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: details
|
||||
version: v1
|
||||
spec:
|
||||
serviceAccountName: bookinfo-details
|
||||
containers:
|
||||
- name: details
|
||||
image: docker.io/istio/examples-bookinfo-details-v1:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
---
|
||||
##################################################################################################
|
||||
# Ratings service
|
||||
##################################################################################################
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ratings
|
||||
labels:
|
||||
app: ratings
|
||||
service: ratings
|
||||
spec:
|
||||
ports:
|
||||
- port: 9080
|
||||
name: http
|
||||
selector:
|
||||
app: ratings
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: bookinfo-ratings
|
||||
labels:
|
||||
account: ratings
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ratings-v1
|
||||
labels:
|
||||
app: ratings
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ratings
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ratings
|
||||
version: v1
|
||||
spec:
|
||||
serviceAccountName: bookinfo-ratings
|
||||
containers:
|
||||
- name: ratings
|
||||
image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
---
|
||||
##################################################################################################
|
||||
# Reviews service
|
||||
##################################################################################################
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: reviews
|
||||
labels:
|
||||
app: reviews
|
||||
service: reviews
|
||||
spec:
|
||||
ports:
|
||||
- port: 9080
|
||||
name: http
|
||||
selector:
|
||||
app: reviews
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: bookinfo-reviews
|
||||
labels:
|
||||
account: reviews
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: reviews-v1
|
||||
labels:
|
||||
app: reviews
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reviews
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reviews
|
||||
version: v1
|
||||
spec:
|
||||
serviceAccountName: bookinfo-reviews
|
||||
containers:
|
||||
- name: reviews
|
||||
image: docker.io/istio/examples-bookinfo-reviews-v1:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: LOG_DIR
|
||||
value: "/tmp/logs"
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
volumeMounts:
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: wlp-output
|
||||
mountPath: /opt/ibm/wlp/output
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
volumes:
|
||||
- name: wlp-output
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: reviews-v2
|
||||
labels:
|
||||
app: reviews
|
||||
version: v2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reviews
|
||||
version: v2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reviews
|
||||
version: v2
|
||||
spec:
|
||||
serviceAccountName: bookinfo-reviews
|
||||
containers:
|
||||
- name: reviews
|
||||
image: docker.io/istio/examples-bookinfo-reviews-v2:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: LOG_DIR
|
||||
value: "/tmp/logs"
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
volumeMounts:
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: wlp-output
|
||||
mountPath: /opt/ibm/wlp/output
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
volumes:
|
||||
- name: wlp-output
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: reviews-v3
|
||||
labels:
|
||||
app: reviews
|
||||
version: v3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reviews
|
||||
version: v3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reviews
|
||||
version: v3
|
||||
spec:
|
||||
serviceAccountName: bookinfo-reviews
|
||||
containers:
|
||||
- name: reviews
|
||||
image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: LOG_DIR
|
||||
value: "/tmp/logs"
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
volumeMounts:
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
- name: wlp-output
|
||||
mountPath: /opt/ibm/wlp/output
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
volumes:
|
||||
- name: wlp-output
|
||||
emptyDir: {}
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
||||
##################################################################################################
|
||||
# Productpage services
|
||||
##################################################################################################
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: productpage
|
||||
labels:
|
||||
app: productpage
|
||||
service: productpage
|
||||
spec:
|
||||
ports:
|
||||
- port: 9080
|
||||
name: http
|
||||
selector:
|
||||
app: productpage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: bookinfo-productpage
|
||||
labels:
|
||||
account: productpage
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: productpage-v1
|
||||
labels:
|
||||
app: productpage
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: productpage
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: productpage
|
||||
version: v1
|
||||
spec:
|
||||
serviceAccountName: bookinfo-productpage
|
||||
containers:
|
||||
- name: productpage
|
||||
image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9080
|
||||
volumeMounts:
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
---
|
25
test/ingress-basic/ingress-basic-cilium.yaml
Normal file
25
test/ingress-basic/ingress-basic-cilium.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Basic ingress for istio bookinfo demo application, which can be found in below
|
||||
# https://raw.githubusercontent.com/istio/istio/release-1.11/samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-basic-cilium
|
||||
spec:
|
||||
ingressClassName: cilium
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: details
|
||||
port:
|
||||
number: 9080
|
||||
path: /details
|
||||
pathType: Prefix
|
||||
- backend:
|
||||
service:
|
||||
name: productpage
|
||||
port:
|
||||
number: 9080
|
||||
path: /
|
||||
pathType: Prefix
|
25
test/ingress-basic/ingress-basic-nginx.yaml
Normal file
25
test/ingress-basic/ingress-basic-nginx.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Basic ingress for istio bookinfo demo application, which can be found in below
|
||||
# https://raw.githubusercontent.com/istio/istio/release-1.11/samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-basic-nginx
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: details
|
||||
port:
|
||||
number: 9080
|
||||
path: /details
|
||||
pathType: Prefix
|
||||
- backend:
|
||||
service:
|
||||
name: productpage
|
||||
port:
|
||||
number: 9080
|
||||
path: /
|
||||
pathType: Prefix
|
254
test/mesh-test.sh
Executable file
254
test/mesh-test.sh
Executable file
|
@ -0,0 +1,254 @@
|
|||
#!/bin/sh
|
||||
# ----
|
||||
# File: mesh-basic.sh
|
||||
# Description: Script to test the cluster mesh on our cilium deployments
|
||||
# Author: Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# Copyright: (c) 2023 Sergio Talens-Oliag <sto@mixinet.net>
|
||||
# ----
|
||||
# REF: https://docs.cilium.io/en/stable/network/clustermesh/services/
|
||||
# ----
|
||||
|
||||
set -e
|
||||
|
||||
# Compute WORK_DIR
|
||||
SCRIPT="$(readlink -f "$0")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT")"
|
||||
WORK_DIR_RELPATH="."
|
||||
WORK_DIR="$(readlink -f "$SCRIPT_DIR/$WORK_DIR_RELPATH")"
|
||||
|
||||
# VARIABLES
|
||||
NAMESPACE="mesh-test"
|
||||
SERVICE="svc/rebel-base"
|
||||
DEPLOYMENT_RB="deployment/rebel-base"
|
||||
DEPLOYMENT_XW="deployment/x-wing"
|
||||
YAML_DIR="$WORK_DIR/mesh-test"
|
||||
GSC1_YAML="$YAML_DIR/cluster1.yaml"
|
||||
GSC2_YAML="$YAML_DIR/cluster2.yaml"
|
||||
ACCESS_TEST_LOOPS="7"
|
||||
|
||||
access_test() {
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
echo "Running $ACCESS_TEST_LOOPS tests from '$ctx'"
|
||||
counter=0
|
||||
while [ "$counter" -lt "$ACCESS_TEST_LOOPS" ]; do
|
||||
kubectl --context "$ctx" -n "$NAMESPACE" exec -ti "$DEPLOYMENT_XW" \
|
||||
-- curl rebel-base
|
||||
counter="$((counter + 1))"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
create() {
|
||||
for cn in "1" "2"; do
|
||||
echo "Creating Global Service on Cluster $cn"
|
||||
create_namespace "$cn"
|
||||
deploy_objects "$cn"
|
||||
done
|
||||
}
|
||||
|
||||
create_namespace() {
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
kubectl --context="$ctx" create ns "$NAMESPACE" || true
|
||||
}
|
||||
|
||||
deploy_objects() {
|
||||
case "$1" in
|
||||
1) ctx="$CTX1"; yaml="$GSC1_YAML";;
|
||||
2) ctx="$CTX2"; yaml="$GSC2_YAML";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
sed -e "s/Cluster-/$CTOOL-cluster-/" "$yaml" |
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" apply -f -
|
||||
}
|
||||
|
||||
delete() {
|
||||
for cn in "1" "2"; do
|
||||
echo "Deleting Global Service on Cluster $cn"
|
||||
delete_objects "$cn" || true
|
||||
delete_namespace "$cn"
|
||||
done
|
||||
}
|
||||
|
||||
delete_deployment() {
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
echo "Deleting '$DEPLOYMENT_RB' on Cluster $1"
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" delete "$DEPLOYMENT_RB" || true
|
||||
}
|
||||
|
||||
delete_namespace() {
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
kubectl --context="$ctx" delete ns "$NAMESPACE" || true
|
||||
}
|
||||
|
||||
delete_objects() {
|
||||
case "$1" in
|
||||
1) ctx="$CTX1"; yaml="$GSC1_YAML";;
|
||||
2) ctx="$CTX2"; yaml="$GSC2_YAML";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
sed -e "s/Cluster-/$CTOOL-cluster-/" "$yaml" |
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" delete -f -
|
||||
}
|
||||
|
||||
get_cilium_annotations() {
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
echo "Service '$SERVICE' cilium annotations on '$ctx'"
|
||||
kubectl --context "$ctx" -n "$NAMESPACE" get "$SERVICE" -o yaml |
|
||||
sed -ne 's/^ service.cilium.io/- service.cilium.io/p'
|
||||
done
|
||||
}
|
||||
|
||||
status() {
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
echo "Mesh test status on '$ctx'"
|
||||
echo ""
|
||||
kubectl --context "$ctx" -n "$NAMESPACE" get all
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_deployments() {
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
for _deployment in "$DEPLOYMENT_RB" "$DEPLOYMENT_XW"; do
|
||||
echo "Waiting for '$_deployment' to be ready on '$ctx'"
|
||||
kubectl wait --context="$ctx" -n "$NAMESPACE" "$_deployment" \
|
||||
--for condition=Available=True --timeout=90s
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
service_affinity_default(){
|
||||
kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/affinity-
|
||||
}
|
||||
|
||||
|
||||
service_affinity_local(){
|
||||
kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/affinity="local" --overwrite
|
||||
}
|
||||
|
||||
service_affinity_none(){
|
||||
kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/affinity="none" --overwrite
|
||||
}
|
||||
|
||||
service_affinity_remote(){
|
||||
kubectl --context="$1" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/affinity="remote" --overwrite
|
||||
}
|
||||
|
||||
service_shared_default(){
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/shared-
|
||||
}
|
||||
|
||||
service_shared_false(){
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/shared="false" --overwrite
|
||||
}
|
||||
|
||||
service_shared_true(){
|
||||
case "$1" in
|
||||
1) ctx="$CTX1";;
|
||||
2) ctx="$CTX2";;
|
||||
*) echo "Unknown cluster number '$1'"; exit 1;;
|
||||
esac
|
||||
kubectl --context="$ctx" -n "$NAMESPACE" annotate "$SERVICE" \
|
||||
service.cilium.io/shared="true" --overwrite
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 CLUST_TYPE ACTION
|
||||
|
||||
Where CLUST_TYPE is 'k3d' or 'kind' and ACTION is one of:
|
||||
|
||||
- create: creates namespaces and deploy services on both clusters
|
||||
- delete: deletes services and namespaces on both clusters
|
||||
- delete-deployment [CLUST]: delete rebel-base deployment from CLUST (default 1)
|
||||
- delete-objects [CLUST]: delete objects from the cluster CLUST (default 1)
|
||||
- deploy-objects [CLUST]: deploy objects on the cluster CLUST (default 1)
|
||||
- get-annotations: get service annotations of both clusters
|
||||
- svc-affinity-local: sets local affinity for the service on both clusters
|
||||
- svc-affinity-remote: sets remote affinity for the service on both clusters
|
||||
- svc-affinity-none: removes affinity for the service on both clusters
|
||||
- svc-shared-default [CLUST]: remove shared annotation from the CLUST cluster
|
||||
- svc-shared-false [CLUST]: removes service sharing from the CLUST cluster
|
||||
- svc-shared-true [CLUST]: enables service sharing on the CLUST cluster
|
||||
- status: prints the deployment status on both clusters
|
||||
- test: calls the services $ACCESS_TEST_LOOPS times from each cluster
|
||||
- wait: waits until the deployments are ready on both clusters
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
# ====
|
||||
# MAIN
|
||||
# ====
|
||||
|
||||
CTOOL="$1"
|
||||
case "$CTOOL" in
|
||||
k3d|kind)
|
||||
CTX1="$CTOOL-cilium1"
|
||||
CTX2="$CTOOL-cilium2"
|
||||
;;
|
||||
"") usage "0";;
|
||||
*) usage "1";;
|
||||
esac
|
||||
|
||||
case "$2" in
|
||||
create) create;;
|
||||
delete) delete;;
|
||||
delete-deployment) delete_deployment "${3:-1}";;
|
||||
delete-objects) delete_objects "${3:-1}";;
|
||||
deploy-objects) deploy_objects "${3:-1}";;
|
||||
get-annotations) get_cilium_annotations;;
|
||||
svc-af-local|svc-affinity-local)
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
service_affinity_local "$ctx"
|
||||
done
|
||||
;;
|
||||
svc-af-remote|svc-affinity-remote)
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
service_affinity_remote "$ctx"
|
||||
done
|
||||
;;
|
||||
svc-af-none|svc-affinity-none)
|
||||
for ctx in "$CTX1" "$CTX2"; do
|
||||
service_affinity_local "$ctx"
|
||||
done
|
||||
;;
|
||||
svc-sh-default|svc-shared-default) service_shared_default "${3:-1}";;
|
||||
svc-sh-false|svc-shared-false) service_shared_false "${3:-1}";;
|
||||
svc-sh-true|svc-shared-true) service_shared_true "${3:-1}";;
|
||||
status) status;;
|
||||
test) access_test ;;
|
||||
wait) wait_for_deployments ;;
|
||||
*) usage "1" ;;
|
||||
esac
|
||||
|
||||
# ----
|
||||
# vim: ts=2:sw=2:et:ai:sts=2
|
91
test/mesh-test/cluster1.yaml
Normal file
91
test/mesh-test/cluster1.yaml
Normal file
|
@ -0,0 +1,91 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rebel-base
|
||||
annotations:
|
||||
service.cilium.io/global: "true"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
name: rebel-base
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rebel-base
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: rebel-base
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: rebel-base
|
||||
spec:
|
||||
containers:
|
||||
- name: rebel-base
|
||||
image: docker.io/nginx:1.15.8
|
||||
volumeMounts:
|
||||
- name: html
|
||||
mountPath: /usr/share/nginx/html/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
periodSeconds: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
volumes:
|
||||
- name: html
|
||||
configMap:
|
||||
name: rebel-base-response
|
||||
items:
|
||||
- key: message
|
||||
path: index.html
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rebel-base-response
|
||||
data:
|
||||
message: "{\"Galaxy\": \"Alderaan\", \"Cluster\": \"Cluster-1\"}\n"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: x-wing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: x-wing
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: x-wing
|
||||
spec:
|
||||
containers:
|
||||
- name: x-wing-container
|
||||
image: docker.io/cilium/json-mock:1.2
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -sS
|
||||
- -o
|
||||
- /dev/null
|
||||
- localhost
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -sS
|
||||
- -o
|
||||
- /dev/null
|
||||
- localhost
|
91
test/mesh-test/cluster2.yaml
Normal file
91
test/mesh-test/cluster2.yaml
Normal file
|
@ -0,0 +1,91 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rebel-base
|
||||
annotations:
|
||||
service.cilium.io/global: "true"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
name: rebel-base
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rebel-base
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: rebel-base
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: rebel-base
|
||||
spec:
|
||||
containers:
|
||||
- name: rebel-base
|
||||
image: docker.io/nginx:1.15.8
|
||||
volumeMounts:
|
||||
- name: html
|
||||
mountPath: /usr/share/nginx/html/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
periodSeconds: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
volumes:
|
||||
- name: html
|
||||
configMap:
|
||||
name: rebel-base-response
|
||||
items:
|
||||
- key: message
|
||||
path: index.html
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rebel-base-response
|
||||
data:
|
||||
message: "{\"Galaxy\": \"Alderaan\", \"Cluster\": \"Cluster-2\"}\n"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: x-wing
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: x-wing
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: x-wing
|
||||
spec:
|
||||
containers:
|
||||
- name: x-wing-container
|
||||
image: docker.io/cilium/json-mock:1.2
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -sS
|
||||
- -o
|
||||
- /dev/null
|
||||
- localhost
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -sS
|
||||
- -o
|
||||
- /dev/null
|
||||
- localhost
|
46
tmpl/cilium.yaml
Normal file
46
tmpl/cilium.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
kubeProxyReplacement: "strict"
|
||||
k8sServiceHost: "{{ .master_node_ip }}"
|
||||
k8sServicePort: "6443"
|
||||
hostServices:
|
||||
enabled: false
|
||||
externalIPs:
|
||||
enabled: true
|
||||
nodePort:
|
||||
enabled: true
|
||||
hostPort:
|
||||
enabled: true
|
||||
image:
|
||||
pullPolicy: "IfNotPresent"
|
||||
ipam:
|
||||
mode: "kubernetes"
|
||||
tunnel: "{{ .tunnel }}"
|
||||
cluster:
|
||||
name: "{{ .cname }}"
|
||||
id: "{{ .cnum }}"
|
||||
ipv4NativeRoutingCIDR: "10.0.0.0/9"
|
||||
operator:
|
||||
replicas: 1
|
||||
# ---
|
||||
bgpControlPlane:
|
||||
enabled: {{ .bgp_control_plane_enabled }}
|
||||
ingressController:
|
||||
default: {{ .ingress_controller_default }}
|
||||
enabled: {{ .ingress_controller_enabled }}
|
||||
loadbalancerMode: "{{ .loadbalancer_mode }}"
|
||||
ipv4NativeRoutingCIDR: "10.0.0.0/9"
|
||||
gatewayAPI:
|
||||
enabled: {{ .gateway_api_enabled }}
|
||||
loadBalancer:
|
||||
l7:
|
||||
backend: envoy
|
||||
# ---
|
||||
hubble:
|
||||
relay:
|
||||
enabled: true
|
||||
ui:
|
||||
enabled: true
|
||||
tls:
|
||||
auto:
|
||||
enabled: true
|
||||
method: "helm"
|
||||
certValidityDuration: "1095"
|
7
tmpl/ippools.yaml
Normal file
7
tmpl/ippools.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: "cilium.io/v2alpha1"
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: "clilium-pool"
|
||||
spec:
|
||||
cidrs:
|
||||
- cidr: "{{ .lb_pool_cdir }}"
|
52
tmpl/k3d-config.yaml
Normal file
52
tmpl/k3d-config.yaml
Normal file
|
@ -0,0 +1,52 @@
|
|||
apiVersion: k3d.io/v1alpha4
|
||||
kind: Simple
|
||||
metadata:
|
||||
name: {{ .cname }}
|
||||
servers: 1
|
||||
agents: 2
|
||||
image: docker.io/rancher/k3s:v1.25.7-k3s1
|
||||
kubeAPI:
|
||||
hostIP: {{ .host_ip }}
|
||||
hostPort: "6443"
|
||||
network: cilium
|
||||
token: CiliumTest1
|
||||
volumes:
|
||||
- volume: {{ .work_dir }}/bin/k3d-entrypoint-cilium.sh:/bin/k3d-entrypoint-cilium.sh
|
||||
nodeFilters:
|
||||
- all
|
||||
options:
|
||||
k3d:
|
||||
wait: true
|
||||
timeout: "6m0s"
|
||||
disableLoadbalancer: true
|
||||
disableImageVolume: false
|
||||
disableRollback: false
|
||||
k3s: # options passed on to K3s itself
|
||||
extraArgs:
|
||||
- arg: --tls-san=127.0.0.1
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --disable=servicelb
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --disable=traefik
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --disable-network-policy
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --flannel-backend=none
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --disable=kube-proxy
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --cluster-cidr={{ .cluster_subnet }}
|
||||
nodeFilters:
|
||||
- server:*
|
||||
- arg: --service-cidr={{ .service_subnet }}
|
||||
nodeFilters:
|
||||
- server:*
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
switchCurrentContext: true
|
15
tmpl/kind-config.yaml
Normal file
15
tmpl/kind-config.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
name: {{ .cname }}
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: worker
|
||||
networking:
|
||||
apiServerAddress: "{{ .host_ip }}"
|
||||
apiServerPort: 6443
|
||||
disableDefaultCNI: true
|
||||
kubeProxyMode: none
|
||||
podSubnet: "{{ .cluster_subnet }}"
|
||||
serviceSubnet: "{{ .service_subnet }}"
|
17
tmpl/metallb-crds.yaml
Normal file
17
tmpl/metallb-crds.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cilium-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- {{ .lb_pool_range }}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cilium-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cilium-pool
|
Loading…
Reference in a new issue